Merge mm-hotfixes-stable into mm-stable to pick up depended-upon changes.
[linux-2.6-microblaze.git] / mm / memcontrol.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  *
24  * Per memcg lru locking
25  * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26  */
27
28 #include <linux/page_counter.h>
29 #include <linux/memcontrol.h>
30 #include <linux/cgroup.h>
31 #include <linux/pagewalk.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/vm_event_item.h>
37 #include <linux/smp.h>
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
40 #include <linux/bit_spinlock.h>
41 #include <linux/rcupdate.h>
42 #include <linux/limits.h>
43 #include <linux/export.h>
44 #include <linux/mutex.h>
45 #include <linux/rbtree.h>
46 #include <linux/slab.h>
47 #include <linux/swap.h>
48 #include <linux/swapops.h>
49 #include <linux/spinlock.h>
50 #include <linux/eventfd.h>
51 #include <linux/poll.h>
52 #include <linux/sort.h>
53 #include <linux/fs.h>
54 #include <linux/seq_file.h>
55 #include <linux/vmpressure.h>
56 #include <linux/memremap.h>
57 #include <linux/mm_inline.h>
58 #include <linux/swap_cgroup.h>
59 #include <linux/cpu.h>
60 #include <linux/oom.h>
61 #include <linux/lockdep.h>
62 #include <linux/file.h>
63 #include <linux/resume_user_mode.h>
64 #include <linux/psi.h>
65 #include <linux/seq_buf.h>
66 #include <linux/sched/isolation.h>
67 #include "internal.h"
68 #include <net/sock.h>
69 #include <net/ip.h>
70 #include "slab.h"
71 #include "swap.h"
72
73 #include <linux/uaccess.h>
74
75 #include <trace/events/vmscan.h>
76
77 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
78 EXPORT_SYMBOL(memory_cgrp_subsys);
79
80 struct mem_cgroup *root_mem_cgroup __read_mostly;
81
82 /* Active memory cgroup to use from an interrupt context */
83 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
84 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
85
86 /* Socket memory accounting disabled? */
87 static bool cgroup_memory_nosocket __ro_after_init;
88
89 /* Kernel memory accounting disabled? */
90 static bool cgroup_memory_nokmem __ro_after_init;
91
92 /* BPF memory accounting disabled? */
93 static bool cgroup_memory_nobpf __ro_after_init;
94
95 #ifdef CONFIG_CGROUP_WRITEBACK
96 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
97 #endif
98
99 /* Whether legacy memory+swap accounting is active */
100 static bool do_memsw_account(void)
101 {
102         return !cgroup_subsys_on_dfl(memory_cgrp_subsys);
103 }
104
105 #define THRESHOLDS_EVENTS_TARGET 128
106 #define SOFTLIMIT_EVENTS_TARGET 1024
107
108 /*
109  * Cgroups above their limits are maintained in a RB-Tree, independent of
110  * their hierarchy representation
111  */
112
113 struct mem_cgroup_tree_per_node {
114         struct rb_root rb_root;
115         struct rb_node *rb_rightmost;
116         spinlock_t lock;
117 };
118
119 struct mem_cgroup_tree {
120         struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
121 };
122
123 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
124
125 /* for OOM */
126 struct mem_cgroup_eventfd_list {
127         struct list_head list;
128         struct eventfd_ctx *eventfd;
129 };
130
131 /*
132  * cgroup_event represents events which userspace want to receive.
133  */
134 struct mem_cgroup_event {
135         /*
136          * memcg which the event belongs to.
137          */
138         struct mem_cgroup *memcg;
139         /*
140          * eventfd to signal userspace about the event.
141          */
142         struct eventfd_ctx *eventfd;
143         /*
144          * Each of these stored in a list by the cgroup.
145          */
146         struct list_head list;
147         /*
148          * register_event() callback will be used to add new userspace
149          * waiter for changes related to this event.  Use eventfd_signal()
150          * on eventfd to send notification to userspace.
151          */
152         int (*register_event)(struct mem_cgroup *memcg,
153                               struct eventfd_ctx *eventfd, const char *args);
154         /*
155          * unregister_event() callback will be called when userspace closes
156          * the eventfd or on cgroup removing.  This callback must be set,
157          * if you want provide notification functionality.
158          */
159         void (*unregister_event)(struct mem_cgroup *memcg,
160                                  struct eventfd_ctx *eventfd);
161         /*
162          * All fields below needed to unregister event when
163          * userspace closes eventfd.
164          */
165         poll_table pt;
166         wait_queue_head_t *wqh;
167         wait_queue_entry_t wait;
168         struct work_struct remove;
169 };
170
171 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
172 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
173
174 /* Stuffs for move charges at task migration. */
175 /*
176  * Types of charges to be moved.
177  */
178 #define MOVE_ANON       0x1U
179 #define MOVE_FILE       0x2U
180 #define MOVE_MASK       (MOVE_ANON | MOVE_FILE)
181
182 /* "mc" and its members are protected by cgroup_mutex */
183 static struct move_charge_struct {
184         spinlock_t        lock; /* for from, to */
185         struct mm_struct  *mm;
186         struct mem_cgroup *from;
187         struct mem_cgroup *to;
188         unsigned long flags;
189         unsigned long precharge;
190         unsigned long moved_charge;
191         unsigned long moved_swap;
192         struct task_struct *moving_task;        /* a task moving charges */
193         wait_queue_head_t waitq;                /* a waitq for other context */
194 } mc = {
195         .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
196         .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
197 };
198
199 /*
200  * Maximum loops in mem_cgroup_soft_reclaim(), used for soft
201  * limit reclaim to prevent infinite loops, if they ever occur.
202  */
203 #define MEM_CGROUP_MAX_RECLAIM_LOOPS            100
204 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
205
206 /* for encoding cft->private value on file */
207 enum res_type {
208         _MEM,
209         _MEMSWAP,
210         _KMEM,
211         _TCP,
212 };
213
214 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
215 #define MEMFILE_TYPE(val)       ((val) >> 16 & 0xffff)
216 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
217
218 /*
219  * Iteration constructs for visiting all cgroups (under a tree).  If
220  * loops are exited prematurely (break), mem_cgroup_iter_break() must
221  * be used for reference counting.
222  */
223 #define for_each_mem_cgroup_tree(iter, root)            \
224         for (iter = mem_cgroup_iter(root, NULL, NULL);  \
225              iter != NULL;                              \
226              iter = mem_cgroup_iter(root, iter, NULL))
227
228 #define for_each_mem_cgroup(iter)                       \
229         for (iter = mem_cgroup_iter(NULL, NULL, NULL);  \
230              iter != NULL;                              \
231              iter = mem_cgroup_iter(NULL, iter, NULL))
232
233 static inline bool task_is_dying(void)
234 {
235         return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
236                 (current->flags & PF_EXITING);
237 }
238
239 /* Some nice accessors for the vmpressure. */
240 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
241 {
242         if (!memcg)
243                 memcg = root_mem_cgroup;
244         return &memcg->vmpressure;
245 }
246
247 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
248 {
249         return container_of(vmpr, struct mem_cgroup, vmpressure);
250 }
251
252 #ifdef CONFIG_MEMCG_KMEM
253 static DEFINE_SPINLOCK(objcg_lock);
254
255 bool mem_cgroup_kmem_disabled(void)
256 {
257         return cgroup_memory_nokmem;
258 }
259
260 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
261                                       unsigned int nr_pages);
262
263 static void obj_cgroup_release(struct percpu_ref *ref)
264 {
265         struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
266         unsigned int nr_bytes;
267         unsigned int nr_pages;
268         unsigned long flags;
269
270         /*
271          * At this point all allocated objects are freed, and
272          * objcg->nr_charged_bytes can't have an arbitrary byte value.
273          * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
274          *
275          * The following sequence can lead to it:
276          * 1) CPU0: objcg == stock->cached_objcg
277          * 2) CPU1: we do a small allocation (e.g. 92 bytes),
278          *          PAGE_SIZE bytes are charged
279          * 3) CPU1: a process from another memcg is allocating something,
280          *          the stock if flushed,
281          *          objcg->nr_charged_bytes = PAGE_SIZE - 92
282          * 5) CPU0: we do release this object,
283          *          92 bytes are added to stock->nr_bytes
284          * 6) CPU0: stock is flushed,
285          *          92 bytes are added to objcg->nr_charged_bytes
286          *
287          * In the result, nr_charged_bytes == PAGE_SIZE.
288          * This page will be uncharged in obj_cgroup_release().
289          */
290         nr_bytes = atomic_read(&objcg->nr_charged_bytes);
291         WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
292         nr_pages = nr_bytes >> PAGE_SHIFT;
293
294         if (nr_pages)
295                 obj_cgroup_uncharge_pages(objcg, nr_pages);
296
297         spin_lock_irqsave(&objcg_lock, flags);
298         list_del(&objcg->list);
299         spin_unlock_irqrestore(&objcg_lock, flags);
300
301         percpu_ref_exit(ref);
302         kfree_rcu(objcg, rcu);
303 }
304
305 static struct obj_cgroup *obj_cgroup_alloc(void)
306 {
307         struct obj_cgroup *objcg;
308         int ret;
309
310         objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
311         if (!objcg)
312                 return NULL;
313
314         ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
315                               GFP_KERNEL);
316         if (ret) {
317                 kfree(objcg);
318                 return NULL;
319         }
320         INIT_LIST_HEAD(&objcg->list);
321         return objcg;
322 }
323
324 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
325                                   struct mem_cgroup *parent)
326 {
327         struct obj_cgroup *objcg, *iter;
328
329         objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
330
331         spin_lock_irq(&objcg_lock);
332
333         /* 1) Ready to reparent active objcg. */
334         list_add(&objcg->list, &memcg->objcg_list);
335         /* 2) Reparent active objcg and already reparented objcgs to parent. */
336         list_for_each_entry(iter, &memcg->objcg_list, list)
337                 WRITE_ONCE(iter->memcg, parent);
338         /* 3) Move already reparented objcgs to the parent's list */
339         list_splice(&memcg->objcg_list, &parent->objcg_list);
340
341         spin_unlock_irq(&objcg_lock);
342
343         percpu_ref_kill(&objcg->refcnt);
344 }
345
346 /*
347  * A lot of the calls to the cache allocation functions are expected to be
348  * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
349  * conditional to this static branch, we'll have to allow modules that does
350  * kmem_cache_alloc and the such to see this symbol as well
351  */
352 DEFINE_STATIC_KEY_FALSE(memcg_kmem_online_key);
353 EXPORT_SYMBOL(memcg_kmem_online_key);
354
355 DEFINE_STATIC_KEY_FALSE(memcg_bpf_enabled_key);
356 EXPORT_SYMBOL(memcg_bpf_enabled_key);
357 #endif
358
359 /**
360  * mem_cgroup_css_from_folio - css of the memcg associated with a folio
361  * @folio: folio of interest
362  *
363  * If memcg is bound to the default hierarchy, css of the memcg associated
364  * with @folio is returned.  The returned css remains associated with @folio
365  * until it is released.
366  *
367  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
368  * is returned.
369  */
370 struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio)
371 {
372         struct mem_cgroup *memcg = folio_memcg(folio);
373
374         if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
375                 memcg = root_mem_cgroup;
376
377         return &memcg->css;
378 }
379
380 /**
381  * page_cgroup_ino - return inode number of the memcg a page is charged to
382  * @page: the page
383  *
384  * Look up the closest online ancestor of the memory cgroup @page is charged to
385  * and return its inode number or 0 if @page is not charged to any cgroup. It
386  * is safe to call this function without holding a reference to @page.
387  *
388  * Note, this function is inherently racy, because there is nothing to prevent
389  * the cgroup inode from getting torn down and potentially reallocated a moment
390  * after page_cgroup_ino() returns, so it only should be used by callers that
391  * do not care (such as procfs interfaces).
392  */
393 ino_t page_cgroup_ino(struct page *page)
394 {
395         struct mem_cgroup *memcg;
396         unsigned long ino = 0;
397
398         rcu_read_lock();
399         /* page_folio() is racy here, but the entire function is racy anyway */
400         memcg = folio_memcg_check(page_folio(page));
401
402         while (memcg && !(memcg->css.flags & CSS_ONLINE))
403                 memcg = parent_mem_cgroup(memcg);
404         if (memcg)
405                 ino = cgroup_ino(memcg->css.cgroup);
406         rcu_read_unlock();
407         return ino;
408 }
409
410 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
411                                          struct mem_cgroup_tree_per_node *mctz,
412                                          unsigned long new_usage_in_excess)
413 {
414         struct rb_node **p = &mctz->rb_root.rb_node;
415         struct rb_node *parent = NULL;
416         struct mem_cgroup_per_node *mz_node;
417         bool rightmost = true;
418
419         if (mz->on_tree)
420                 return;
421
422         mz->usage_in_excess = new_usage_in_excess;
423         if (!mz->usage_in_excess)
424                 return;
425         while (*p) {
426                 parent = *p;
427                 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
428                                         tree_node);
429                 if (mz->usage_in_excess < mz_node->usage_in_excess) {
430                         p = &(*p)->rb_left;
431                         rightmost = false;
432                 } else {
433                         p = &(*p)->rb_right;
434                 }
435         }
436
437         if (rightmost)
438                 mctz->rb_rightmost = &mz->tree_node;
439
440         rb_link_node(&mz->tree_node, parent, p);
441         rb_insert_color(&mz->tree_node, &mctz->rb_root);
442         mz->on_tree = true;
443 }
444
445 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
446                                          struct mem_cgroup_tree_per_node *mctz)
447 {
448         if (!mz->on_tree)
449                 return;
450
451         if (&mz->tree_node == mctz->rb_rightmost)
452                 mctz->rb_rightmost = rb_prev(&mz->tree_node);
453
454         rb_erase(&mz->tree_node, &mctz->rb_root);
455         mz->on_tree = false;
456 }
457
458 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
459                                        struct mem_cgroup_tree_per_node *mctz)
460 {
461         unsigned long flags;
462
463         spin_lock_irqsave(&mctz->lock, flags);
464         __mem_cgroup_remove_exceeded(mz, mctz);
465         spin_unlock_irqrestore(&mctz->lock, flags);
466 }
467
468 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
469 {
470         unsigned long nr_pages = page_counter_read(&memcg->memory);
471         unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
472         unsigned long excess = 0;
473
474         if (nr_pages > soft_limit)
475                 excess = nr_pages - soft_limit;
476
477         return excess;
478 }
479
480 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
481 {
482         unsigned long excess;
483         struct mem_cgroup_per_node *mz;
484         struct mem_cgroup_tree_per_node *mctz;
485
486         if (lru_gen_enabled()) {
487                 if (soft_limit_excess(memcg))
488                         lru_gen_soft_reclaim(memcg, nid);
489                 return;
490         }
491
492         mctz = soft_limit_tree.rb_tree_per_node[nid];
493         if (!mctz)
494                 return;
495         /*
496          * Necessary to update all ancestors when hierarchy is used.
497          * because their event counter is not touched.
498          */
499         for (; memcg; memcg = parent_mem_cgroup(memcg)) {
500                 mz = memcg->nodeinfo[nid];
501                 excess = soft_limit_excess(memcg);
502                 /*
503                  * We have to update the tree if mz is on RB-tree or
504                  * mem is over its softlimit.
505                  */
506                 if (excess || mz->on_tree) {
507                         unsigned long flags;
508
509                         spin_lock_irqsave(&mctz->lock, flags);
510                         /* if on-tree, remove it */
511                         if (mz->on_tree)
512                                 __mem_cgroup_remove_exceeded(mz, mctz);
513                         /*
514                          * Insert again. mz->usage_in_excess will be updated.
515                          * If excess is 0, no tree ops.
516                          */
517                         __mem_cgroup_insert_exceeded(mz, mctz, excess);
518                         spin_unlock_irqrestore(&mctz->lock, flags);
519                 }
520         }
521 }
522
523 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
524 {
525         struct mem_cgroup_tree_per_node *mctz;
526         struct mem_cgroup_per_node *mz;
527         int nid;
528
529         for_each_node(nid) {
530                 mz = memcg->nodeinfo[nid];
531                 mctz = soft_limit_tree.rb_tree_per_node[nid];
532                 if (mctz)
533                         mem_cgroup_remove_exceeded(mz, mctz);
534         }
535 }
536
537 static struct mem_cgroup_per_node *
538 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
539 {
540         struct mem_cgroup_per_node *mz;
541
542 retry:
543         mz = NULL;
544         if (!mctz->rb_rightmost)
545                 goto done;              /* Nothing to reclaim from */
546
547         mz = rb_entry(mctz->rb_rightmost,
548                       struct mem_cgroup_per_node, tree_node);
549         /*
550          * Remove the node now but someone else can add it back,
551          * we will to add it back at the end of reclaim to its correct
552          * position in the tree.
553          */
554         __mem_cgroup_remove_exceeded(mz, mctz);
555         if (!soft_limit_excess(mz->memcg) ||
556             !css_tryget(&mz->memcg->css))
557                 goto retry;
558 done:
559         return mz;
560 }
561
562 static struct mem_cgroup_per_node *
563 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
564 {
565         struct mem_cgroup_per_node *mz;
566
567         spin_lock_irq(&mctz->lock);
568         mz = __mem_cgroup_largest_soft_limit_node(mctz);
569         spin_unlock_irq(&mctz->lock);
570         return mz;
571 }
572
573 /*
574  * memcg and lruvec stats flushing
575  *
576  * Many codepaths leading to stats update or read are performance sensitive and
577  * adding stats flushing in such codepaths is not desirable. So, to optimize the
578  * flushing the kernel does:
579  *
580  * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
581  *    rstat update tree grow unbounded.
582  *
583  * 2) Flush the stats synchronously on reader side only when there are more than
584  *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
585  *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
586  *    only for 2 seconds due to (1).
587  */
588 static void flush_memcg_stats_dwork(struct work_struct *w);
589 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
590 static DEFINE_PER_CPU(unsigned int, stats_updates);
591 static atomic_t stats_flush_ongoing = ATOMIC_INIT(0);
592 static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
593 static u64 flush_next_time;
594
595 #define FLUSH_TIME (2UL*HZ)
596
597 /*
598  * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
599  * not rely on this as part of an acquired spinlock_t lock. These functions are
600  * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
601  * is sufficient.
602  */
603 static void memcg_stats_lock(void)
604 {
605         preempt_disable_nested();
606         VM_WARN_ON_IRQS_ENABLED();
607 }
608
609 static void __memcg_stats_lock(void)
610 {
611         preempt_disable_nested();
612 }
613
614 static void memcg_stats_unlock(void)
615 {
616         preempt_enable_nested();
617 }
618
619 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
620 {
621         unsigned int x;
622
623         if (!val)
624                 return;
625
626         cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
627
628         x = __this_cpu_add_return(stats_updates, abs(val));
629         if (x > MEMCG_CHARGE_BATCH) {
630                 /*
631                  * If stats_flush_threshold exceeds the threshold
632                  * (>num_online_cpus()), cgroup stats update will be triggered
633                  * in __mem_cgroup_flush_stats(). Increasing this var further
634                  * is redundant and simply adds overhead in atomic update.
635                  */
636                 if (atomic_read(&stats_flush_threshold) <= num_online_cpus())
637                         atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold);
638                 __this_cpu_write(stats_updates, 0);
639         }
640 }
641
642 static void do_flush_stats(void)
643 {
644         /*
645          * We always flush the entire tree, so concurrent flushers can just
646          * skip. This avoids a thundering herd problem on the rstat global lock
647          * from memcg flushers (e.g. reclaim, refault, etc).
648          */
649         if (atomic_read(&stats_flush_ongoing) ||
650             atomic_xchg(&stats_flush_ongoing, 1))
651                 return;
652
653         WRITE_ONCE(flush_next_time, jiffies_64 + 2*FLUSH_TIME);
654
655         cgroup_rstat_flush(root_mem_cgroup->css.cgroup);
656
657         atomic_set(&stats_flush_threshold, 0);
658         atomic_set(&stats_flush_ongoing, 0);
659 }
660
661 void mem_cgroup_flush_stats(void)
662 {
663         if (atomic_read(&stats_flush_threshold) > num_online_cpus())
664                 do_flush_stats();
665 }
666
667 void mem_cgroup_flush_stats_ratelimited(void)
668 {
669         if (time_after64(jiffies_64, READ_ONCE(flush_next_time)))
670                 mem_cgroup_flush_stats();
671 }
672
673 static void flush_memcg_stats_dwork(struct work_struct *w)
674 {
675         /*
676          * Always flush here so that flushing in latency-sensitive paths is
677          * as cheap as possible.
678          */
679         do_flush_stats();
680         queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
681 }
682
683 /* Subset of vm_event_item to report for memcg event stats */
684 static const unsigned int memcg_vm_event_stat[] = {
685         PGPGIN,
686         PGPGOUT,
687         PGSCAN_KSWAPD,
688         PGSCAN_DIRECT,
689         PGSCAN_KHUGEPAGED,
690         PGSTEAL_KSWAPD,
691         PGSTEAL_DIRECT,
692         PGSTEAL_KHUGEPAGED,
693         PGFAULT,
694         PGMAJFAULT,
695         PGREFILL,
696         PGACTIVATE,
697         PGDEACTIVATE,
698         PGLAZYFREE,
699         PGLAZYFREED,
700 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
701         ZSWPIN,
702         ZSWPOUT,
703 #endif
704 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
705         THP_FAULT_ALLOC,
706         THP_COLLAPSE_ALLOC,
707         THP_SWPOUT,
708         THP_SWPOUT_FALLBACK,
709 #endif
710 };
711
712 #define NR_MEMCG_EVENTS ARRAY_SIZE(memcg_vm_event_stat)
713 static int mem_cgroup_events_index[NR_VM_EVENT_ITEMS] __read_mostly;
714
715 static void init_memcg_events(void)
716 {
717         int i;
718
719         for (i = 0; i < NR_MEMCG_EVENTS; ++i)
720                 mem_cgroup_events_index[memcg_vm_event_stat[i]] = i + 1;
721 }
722
723 static inline int memcg_events_index(enum vm_event_item idx)
724 {
725         return mem_cgroup_events_index[idx] - 1;
726 }
727
728 struct memcg_vmstats_percpu {
729         /* Local (CPU and cgroup) page state & events */
730         long                    state[MEMCG_NR_STAT];
731         unsigned long           events[NR_MEMCG_EVENTS];
732
733         /* Delta calculation for lockless upward propagation */
734         long                    state_prev[MEMCG_NR_STAT];
735         unsigned long           events_prev[NR_MEMCG_EVENTS];
736
737         /* Cgroup1: threshold notifications & softlimit tree updates */
738         unsigned long           nr_page_events;
739         unsigned long           targets[MEM_CGROUP_NTARGETS];
740 };
741
742 struct memcg_vmstats {
743         /* Aggregated (CPU and subtree) page state & events */
744         long                    state[MEMCG_NR_STAT];
745         unsigned long           events[NR_MEMCG_EVENTS];
746
747         /* Non-hierarchical (CPU aggregated) page state & events */
748         long                    state_local[MEMCG_NR_STAT];
749         unsigned long           events_local[NR_MEMCG_EVENTS];
750
751         /* Pending child counts during tree propagation */
752         long                    state_pending[MEMCG_NR_STAT];
753         unsigned long           events_pending[NR_MEMCG_EVENTS];
754 };
755
756 unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
757 {
758         long x = READ_ONCE(memcg->vmstats->state[idx]);
759 #ifdef CONFIG_SMP
760         if (x < 0)
761                 x = 0;
762 #endif
763         return x;
764 }
765
766 /**
767  * __mod_memcg_state - update cgroup memory statistics
768  * @memcg: the memory cgroup
769  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
770  * @val: delta to add to the counter, can be negative
771  */
772 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
773 {
774         if (mem_cgroup_disabled())
775                 return;
776
777         __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
778         memcg_rstat_updated(memcg, val);
779 }
780
781 /* idx can be of type enum memcg_stat_item or node_stat_item. */
782 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
783 {
784         long x = READ_ONCE(memcg->vmstats->state_local[idx]);
785
786 #ifdef CONFIG_SMP
787         if (x < 0)
788                 x = 0;
789 #endif
790         return x;
791 }
792
793 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
794                               int val)
795 {
796         struct mem_cgroup_per_node *pn;
797         struct mem_cgroup *memcg;
798
799         pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
800         memcg = pn->memcg;
801
802         /*
803          * The caller from rmap relay on disabled preemption becase they never
804          * update their counter from in-interrupt context. For these two
805          * counters we check that the update is never performed from an
806          * interrupt context while other caller need to have disabled interrupt.
807          */
808         __memcg_stats_lock();
809         if (IS_ENABLED(CONFIG_DEBUG_VM)) {
810                 switch (idx) {
811                 case NR_ANON_MAPPED:
812                 case NR_FILE_MAPPED:
813                 case NR_ANON_THPS:
814                 case NR_SHMEM_PMDMAPPED:
815                 case NR_FILE_PMDMAPPED:
816                         WARN_ON_ONCE(!in_task());
817                         break;
818                 default:
819                         VM_WARN_ON_IRQS_ENABLED();
820                 }
821         }
822
823         /* Update memcg */
824         __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
825
826         /* Update lruvec */
827         __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
828
829         memcg_rstat_updated(memcg, val);
830         memcg_stats_unlock();
831 }
832
833 /**
834  * __mod_lruvec_state - update lruvec memory statistics
835  * @lruvec: the lruvec
836  * @idx: the stat item
837  * @val: delta to add to the counter, can be negative
838  *
839  * The lruvec is the intersection of the NUMA node and a cgroup. This
840  * function updates the all three counters that are affected by a
841  * change of state at this level: per-node, per-cgroup, per-lruvec.
842  */
843 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
844                         int val)
845 {
846         /* Update node */
847         __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
848
849         /* Update memcg and lruvec */
850         if (!mem_cgroup_disabled())
851                 __mod_memcg_lruvec_state(lruvec, idx, val);
852 }
853
854 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
855                              int val)
856 {
857         struct page *head = compound_head(page); /* rmap on tail pages */
858         struct mem_cgroup *memcg;
859         pg_data_t *pgdat = page_pgdat(page);
860         struct lruvec *lruvec;
861
862         rcu_read_lock();
863         memcg = page_memcg(head);
864         /* Untracked pages have no memcg, no lruvec. Update only the node */
865         if (!memcg) {
866                 rcu_read_unlock();
867                 __mod_node_page_state(pgdat, idx, val);
868                 return;
869         }
870
871         lruvec = mem_cgroup_lruvec(memcg, pgdat);
872         __mod_lruvec_state(lruvec, idx, val);
873         rcu_read_unlock();
874 }
875 EXPORT_SYMBOL(__mod_lruvec_page_state);
876
877 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
878 {
879         pg_data_t *pgdat = page_pgdat(virt_to_page(p));
880         struct mem_cgroup *memcg;
881         struct lruvec *lruvec;
882
883         rcu_read_lock();
884         memcg = mem_cgroup_from_slab_obj(p);
885
886         /*
887          * Untracked pages have no memcg, no lruvec. Update only the
888          * node. If we reparent the slab objects to the root memcg,
889          * when we free the slab object, we need to update the per-memcg
890          * vmstats to keep it correct for the root memcg.
891          */
892         if (!memcg) {
893                 __mod_node_page_state(pgdat, idx, val);
894         } else {
895                 lruvec = mem_cgroup_lruvec(memcg, pgdat);
896                 __mod_lruvec_state(lruvec, idx, val);
897         }
898         rcu_read_unlock();
899 }
900
901 /**
902  * __count_memcg_events - account VM events in a cgroup
903  * @memcg: the memory cgroup
904  * @idx: the event item
905  * @count: the number of events that occurred
906  */
907 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
908                           unsigned long count)
909 {
910         int index = memcg_events_index(idx);
911
912         if (mem_cgroup_disabled() || index < 0)
913                 return;
914
915         memcg_stats_lock();
916         __this_cpu_add(memcg->vmstats_percpu->events[index], count);
917         memcg_rstat_updated(memcg, count);
918         memcg_stats_unlock();
919 }
920
921 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
922 {
923         int index = memcg_events_index(event);
924
925         if (index < 0)
926                 return 0;
927         return READ_ONCE(memcg->vmstats->events[index]);
928 }
929
930 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
931 {
932         int index = memcg_events_index(event);
933
934         if (index < 0)
935                 return 0;
936
937         return READ_ONCE(memcg->vmstats->events_local[index]);
938 }
939
940 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
941                                          int nr_pages)
942 {
943         /* pagein of a big page is an event. So, ignore page size */
944         if (nr_pages > 0)
945                 __count_memcg_events(memcg, PGPGIN, 1);
946         else {
947                 __count_memcg_events(memcg, PGPGOUT, 1);
948                 nr_pages = -nr_pages; /* for event */
949         }
950
951         __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
952 }
953
954 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
955                                        enum mem_cgroup_events_target target)
956 {
957         unsigned long val, next;
958
959         val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
960         next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
961         /* from time_after() in jiffies.h */
962         if ((long)(next - val) < 0) {
963                 switch (target) {
964                 case MEM_CGROUP_TARGET_THRESH:
965                         next = val + THRESHOLDS_EVENTS_TARGET;
966                         break;
967                 case MEM_CGROUP_TARGET_SOFTLIMIT:
968                         next = val + SOFTLIMIT_EVENTS_TARGET;
969                         break;
970                 default:
971                         break;
972                 }
973                 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
974                 return true;
975         }
976         return false;
977 }
978
979 /*
980  * Check events in order.
981  *
982  */
983 static void memcg_check_events(struct mem_cgroup *memcg, int nid)
984 {
985         if (IS_ENABLED(CONFIG_PREEMPT_RT))
986                 return;
987
988         /* threshold event is triggered in finer grain than soft limit */
989         if (unlikely(mem_cgroup_event_ratelimit(memcg,
990                                                 MEM_CGROUP_TARGET_THRESH))) {
991                 bool do_softlimit;
992
993                 do_softlimit = mem_cgroup_event_ratelimit(memcg,
994                                                 MEM_CGROUP_TARGET_SOFTLIMIT);
995                 mem_cgroup_threshold(memcg);
996                 if (unlikely(do_softlimit))
997                         mem_cgroup_update_tree(memcg, nid);
998         }
999 }
1000
1001 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1002 {
1003         /*
1004          * mm_update_next_owner() may clear mm->owner to NULL
1005          * if it races with swapoff, page migration, etc.
1006          * So this can be called with p == NULL.
1007          */
1008         if (unlikely(!p))
1009                 return NULL;
1010
1011         return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1012 }
1013 EXPORT_SYMBOL(mem_cgroup_from_task);
1014
1015 static __always_inline struct mem_cgroup *active_memcg(void)
1016 {
1017         if (!in_task())
1018                 return this_cpu_read(int_active_memcg);
1019         else
1020                 return current->active_memcg;
1021 }
1022
1023 /**
1024  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
1025  * @mm: mm from which memcg should be extracted. It can be NULL.
1026  *
1027  * Obtain a reference on mm->memcg and returns it if successful. If mm
1028  * is NULL, then the memcg is chosen as follows:
1029  * 1) The active memcg, if set.
1030  * 2) current->mm->memcg, if available
1031  * 3) root memcg
1032  * If mem_cgroup is disabled, NULL is returned.
1033  */
1034 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1035 {
1036         struct mem_cgroup *memcg;
1037
1038         if (mem_cgroup_disabled())
1039                 return NULL;
1040
1041         /*
1042          * Page cache insertions can happen without an
1043          * actual mm context, e.g. during disk probing
1044          * on boot, loopback IO, acct() writes etc.
1045          *
1046          * No need to css_get on root memcg as the reference
1047          * counting is disabled on the root level in the
1048          * cgroup core. See CSS_NO_REF.
1049          */
1050         if (unlikely(!mm)) {
1051                 memcg = active_memcg();
1052                 if (unlikely(memcg)) {
1053                         /* remote memcg must hold a ref */
1054                         css_get(&memcg->css);
1055                         return memcg;
1056                 }
1057                 mm = current->mm;
1058                 if (unlikely(!mm))
1059                         return root_mem_cgroup;
1060         }
1061
1062         rcu_read_lock();
1063         do {
1064                 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1065                 if (unlikely(!memcg))
1066                         memcg = root_mem_cgroup;
1067         } while (!css_tryget(&memcg->css));
1068         rcu_read_unlock();
1069         return memcg;
1070 }
1071 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
1072
1073 static __always_inline bool memcg_kmem_bypass(void)
1074 {
1075         /* Allow remote memcg charging from any context. */
1076         if (unlikely(active_memcg()))
1077                 return false;
1078
1079         /* Memcg to charge can't be determined. */
1080         if (!in_task() || !current->mm || (current->flags & PF_KTHREAD))
1081                 return true;
1082
1083         return false;
1084 }
1085
1086 /**
1087  * mem_cgroup_iter - iterate over memory cgroup hierarchy
1088  * @root: hierarchy root
1089  * @prev: previously returned memcg, NULL on first invocation
1090  * @reclaim: cookie for shared reclaim walks, NULL for full walks
1091  *
1092  * Returns references to children of the hierarchy below @root, or
1093  * @root itself, or %NULL after a full round-trip.
1094  *
1095  * Caller must pass the return value in @prev on subsequent
1096  * invocations for reference counting, or use mem_cgroup_iter_break()
1097  * to cancel a hierarchy walk before the round-trip is complete.
1098  *
1099  * Reclaimers can specify a node in @reclaim to divide up the memcgs
1100  * in the hierarchy among all concurrent reclaimers operating on the
1101  * same node.
1102  */
1103 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1104                                    struct mem_cgroup *prev,
1105                                    struct mem_cgroup_reclaim_cookie *reclaim)
1106 {
1107         struct mem_cgroup_reclaim_iter *iter;
1108         struct cgroup_subsys_state *css = NULL;
1109         struct mem_cgroup *memcg = NULL;
1110         struct mem_cgroup *pos = NULL;
1111
1112         if (mem_cgroup_disabled())
1113                 return NULL;
1114
1115         if (!root)
1116                 root = root_mem_cgroup;
1117
1118         rcu_read_lock();
1119
1120         if (reclaim) {
1121                 struct mem_cgroup_per_node *mz;
1122
1123                 mz = root->nodeinfo[reclaim->pgdat->node_id];
1124                 iter = &mz->iter;
1125
1126                 /*
1127                  * On start, join the current reclaim iteration cycle.
1128                  * Exit when a concurrent walker completes it.
1129                  */
1130                 if (!prev)
1131                         reclaim->generation = iter->generation;
1132                 else if (reclaim->generation != iter->generation)
1133                         goto out_unlock;
1134
1135                 while (1) {
1136                         pos = READ_ONCE(iter->position);
1137                         if (!pos || css_tryget(&pos->css))
1138                                 break;
1139                         /*
1140                          * css reference reached zero, so iter->position will
1141                          * be cleared by ->css_released. However, we should not
1142                          * rely on this happening soon, because ->css_released
1143                          * is called from a work queue, and by busy-waiting we
1144                          * might block it. So we clear iter->position right
1145                          * away.
1146                          */
1147                         (void)cmpxchg(&iter->position, pos, NULL);
1148                 }
1149         } else if (prev) {
1150                 pos = prev;
1151         }
1152
1153         if (pos)
1154                 css = &pos->css;
1155
1156         for (;;) {
1157                 css = css_next_descendant_pre(css, &root->css);
1158                 if (!css) {
1159                         /*
1160                          * Reclaimers share the hierarchy walk, and a
1161                          * new one might jump in right at the end of
1162                          * the hierarchy - make sure they see at least
1163                          * one group and restart from the beginning.
1164                          */
1165                         if (!prev)
1166                                 continue;
1167                         break;
1168                 }
1169
1170                 /*
1171                  * Verify the css and acquire a reference.  The root
1172                  * is provided by the caller, so we know it's alive
1173                  * and kicking, and don't take an extra reference.
1174                  */
1175                 if (css == &root->css || css_tryget(css)) {
1176                         memcg = mem_cgroup_from_css(css);
1177                         break;
1178                 }
1179         }
1180
1181         if (reclaim) {
1182                 /*
1183                  * The position could have already been updated by a competing
1184                  * thread, so check that the value hasn't changed since we read
1185                  * it to avoid reclaiming from the same cgroup twice.
1186                  */
1187                 (void)cmpxchg(&iter->position, pos, memcg);
1188
1189                 if (pos)
1190                         css_put(&pos->css);
1191
1192                 if (!memcg)
1193                         iter->generation++;
1194         }
1195
1196 out_unlock:
1197         rcu_read_unlock();
1198         if (prev && prev != root)
1199                 css_put(&prev->css);
1200
1201         return memcg;
1202 }
1203
1204 /**
1205  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1206  * @root: hierarchy root
1207  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1208  */
1209 void mem_cgroup_iter_break(struct mem_cgroup *root,
1210                            struct mem_cgroup *prev)
1211 {
1212         if (!root)
1213                 root = root_mem_cgroup;
1214         if (prev && prev != root)
1215                 css_put(&prev->css);
1216 }
1217
1218 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1219                                         struct mem_cgroup *dead_memcg)
1220 {
1221         struct mem_cgroup_reclaim_iter *iter;
1222         struct mem_cgroup_per_node *mz;
1223         int nid;
1224
1225         for_each_node(nid) {
1226                 mz = from->nodeinfo[nid];
1227                 iter = &mz->iter;
1228                 cmpxchg(&iter->position, dead_memcg, NULL);
1229         }
1230 }
1231
1232 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1233 {
1234         struct mem_cgroup *memcg = dead_memcg;
1235         struct mem_cgroup *last;
1236
1237         do {
1238                 __invalidate_reclaim_iterators(memcg, dead_memcg);
1239                 last = memcg;
1240         } while ((memcg = parent_mem_cgroup(memcg)));
1241
1242         /*
1243          * When cgroup1 non-hierarchy mode is used,
1244          * parent_mem_cgroup() does not walk all the way up to the
1245          * cgroup root (root_mem_cgroup). So we have to handle
1246          * dead_memcg from cgroup root separately.
1247          */
1248         if (!mem_cgroup_is_root(last))
1249                 __invalidate_reclaim_iterators(root_mem_cgroup,
1250                                                 dead_memcg);
1251 }
1252
1253 /**
1254  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1255  * @memcg: hierarchy root
1256  * @fn: function to call for each task
1257  * @arg: argument passed to @fn
1258  *
1259  * This function iterates over tasks attached to @memcg or to any of its
1260  * descendants and calls @fn for each task. If @fn returns a non-zero
1261  * value, the function breaks the iteration loop. Otherwise, it will iterate
1262  * over all tasks and return 0.
1263  *
1264  * This function must not be called for the root memory cgroup.
1265  */
1266 void mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1267                            int (*fn)(struct task_struct *, void *), void *arg)
1268 {
1269         struct mem_cgroup *iter;
1270         int ret = 0;
1271
1272         BUG_ON(mem_cgroup_is_root(memcg));
1273
1274         for_each_mem_cgroup_tree(iter, memcg) {
1275                 struct css_task_iter it;
1276                 struct task_struct *task;
1277
1278                 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1279                 while (!ret && (task = css_task_iter_next(&it)))
1280                         ret = fn(task, arg);
1281                 css_task_iter_end(&it);
1282                 if (ret) {
1283                         mem_cgroup_iter_break(memcg, iter);
1284                         break;
1285                 }
1286         }
1287 }
1288
1289 #ifdef CONFIG_DEBUG_VM
1290 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1291 {
1292         struct mem_cgroup *memcg;
1293
1294         if (mem_cgroup_disabled())
1295                 return;
1296
1297         memcg = folio_memcg(folio);
1298
1299         if (!memcg)
1300                 VM_BUG_ON_FOLIO(!mem_cgroup_is_root(lruvec_memcg(lruvec)), folio);
1301         else
1302                 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1303 }
1304 #endif
1305
1306 /**
1307  * folio_lruvec_lock - Lock the lruvec for a folio.
1308  * @folio: Pointer to the folio.
1309  *
1310  * These functions are safe to use under any of the following conditions:
1311  * - folio locked
1312  * - folio_test_lru false
1313  * - folio_memcg_lock()
1314  * - folio frozen (refcount of 0)
1315  *
1316  * Return: The lruvec this folio is on with its lock held.
1317  */
1318 struct lruvec *folio_lruvec_lock(struct folio *folio)
1319 {
1320         struct lruvec *lruvec = folio_lruvec(folio);
1321
1322         spin_lock(&lruvec->lru_lock);
1323         lruvec_memcg_debug(lruvec, folio);
1324
1325         return lruvec;
1326 }
1327
1328 /**
1329  * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1330  * @folio: Pointer to the folio.
1331  *
1332  * These functions are safe to use under any of the following conditions:
1333  * - folio locked
1334  * - folio_test_lru false
1335  * - folio_memcg_lock()
1336  * - folio frozen (refcount of 0)
1337  *
1338  * Return: The lruvec this folio is on with its lock held and interrupts
1339  * disabled.
1340  */
1341 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1342 {
1343         struct lruvec *lruvec = folio_lruvec(folio);
1344
1345         spin_lock_irq(&lruvec->lru_lock);
1346         lruvec_memcg_debug(lruvec, folio);
1347
1348         return lruvec;
1349 }
1350
1351 /**
1352  * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1353  * @folio: Pointer to the folio.
1354  * @flags: Pointer to irqsave flags.
1355  *
1356  * These functions are safe to use under any of the following conditions:
1357  * - folio locked
1358  * - folio_test_lru false
1359  * - folio_memcg_lock()
1360  * - folio frozen (refcount of 0)
1361  *
1362  * Return: The lruvec this folio is on with its lock held and interrupts
1363  * disabled.
1364  */
1365 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1366                 unsigned long *flags)
1367 {
1368         struct lruvec *lruvec = folio_lruvec(folio);
1369
1370         spin_lock_irqsave(&lruvec->lru_lock, *flags);
1371         lruvec_memcg_debug(lruvec, folio);
1372
1373         return lruvec;
1374 }
1375
1376 /**
1377  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1378  * @lruvec: mem_cgroup per zone lru vector
1379  * @lru: index of lru list the page is sitting on
1380  * @zid: zone id of the accounted pages
1381  * @nr_pages: positive when adding or negative when removing
1382  *
1383  * This function must be called under lru_lock, just before a page is added
1384  * to or just after a page is removed from an lru list.
1385  */
1386 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1387                                 int zid, int nr_pages)
1388 {
1389         struct mem_cgroup_per_node *mz;
1390         unsigned long *lru_size;
1391         long size;
1392
1393         if (mem_cgroup_disabled())
1394                 return;
1395
1396         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1397         lru_size = &mz->lru_zone_size[zid][lru];
1398
1399         if (nr_pages < 0)
1400                 *lru_size += nr_pages;
1401
1402         size = *lru_size;
1403         if (WARN_ONCE(size < 0,
1404                 "%s(%p, %d, %d): lru_size %ld\n",
1405                 __func__, lruvec, lru, nr_pages, size)) {
1406                 VM_BUG_ON(1);
1407                 *lru_size = 0;
1408         }
1409
1410         if (nr_pages > 0)
1411                 *lru_size += nr_pages;
1412 }
1413
1414 /**
1415  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1416  * @memcg: the memory cgroup
1417  *
1418  * Returns the maximum amount of memory @mem can be charged with, in
1419  * pages.
1420  */
1421 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1422 {
1423         unsigned long margin = 0;
1424         unsigned long count;
1425         unsigned long limit;
1426
1427         count = page_counter_read(&memcg->memory);
1428         limit = READ_ONCE(memcg->memory.max);
1429         if (count < limit)
1430                 margin = limit - count;
1431
1432         if (do_memsw_account()) {
1433                 count = page_counter_read(&memcg->memsw);
1434                 limit = READ_ONCE(memcg->memsw.max);
1435                 if (count < limit)
1436                         margin = min(margin, limit - count);
1437                 else
1438                         margin = 0;
1439         }
1440
1441         return margin;
1442 }
1443
1444 /*
1445  * A routine for checking "mem" is under move_account() or not.
1446  *
1447  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1448  * moving cgroups. This is for waiting at high-memory pressure
1449  * caused by "move".
1450  */
1451 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1452 {
1453         struct mem_cgroup *from;
1454         struct mem_cgroup *to;
1455         bool ret = false;
1456         /*
1457          * Unlike task_move routines, we access mc.to, mc.from not under
1458          * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1459          */
1460         spin_lock(&mc.lock);
1461         from = mc.from;
1462         to = mc.to;
1463         if (!from)
1464                 goto unlock;
1465
1466         ret = mem_cgroup_is_descendant(from, memcg) ||
1467                 mem_cgroup_is_descendant(to, memcg);
1468 unlock:
1469         spin_unlock(&mc.lock);
1470         return ret;
1471 }
1472
1473 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1474 {
1475         if (mc.moving_task && current != mc.moving_task) {
1476                 if (mem_cgroup_under_move(memcg)) {
1477                         DEFINE_WAIT(wait);
1478                         prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1479                         /* moving charge context might have finished. */
1480                         if (mc.moving_task)
1481                                 schedule();
1482                         finish_wait(&mc.waitq, &wait);
1483                         return true;
1484                 }
1485         }
1486         return false;
1487 }
1488
1489 struct memory_stat {
1490         const char *name;
1491         unsigned int idx;
1492 };
1493
1494 static const struct memory_stat memory_stats[] = {
1495         { "anon",                       NR_ANON_MAPPED                  },
1496         { "file",                       NR_FILE_PAGES                   },
1497         { "kernel",                     MEMCG_KMEM                      },
1498         { "kernel_stack",               NR_KERNEL_STACK_KB              },
1499         { "pagetables",                 NR_PAGETABLE                    },
1500         { "sec_pagetables",             NR_SECONDARY_PAGETABLE          },
1501         { "percpu",                     MEMCG_PERCPU_B                  },
1502         { "sock",                       MEMCG_SOCK                      },
1503         { "vmalloc",                    MEMCG_VMALLOC                   },
1504         { "shmem",                      NR_SHMEM                        },
1505 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
1506         { "zswap",                      MEMCG_ZSWAP_B                   },
1507         { "zswapped",                   MEMCG_ZSWAPPED                  },
1508 #endif
1509         { "file_mapped",                NR_FILE_MAPPED                  },
1510         { "file_dirty",                 NR_FILE_DIRTY                   },
1511         { "file_writeback",             NR_WRITEBACK                    },
1512 #ifdef CONFIG_SWAP
1513         { "swapcached",                 NR_SWAPCACHE                    },
1514 #endif
1515 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1516         { "anon_thp",                   NR_ANON_THPS                    },
1517         { "file_thp",                   NR_FILE_THPS                    },
1518         { "shmem_thp",                  NR_SHMEM_THPS                   },
1519 #endif
1520         { "inactive_anon",              NR_INACTIVE_ANON                },
1521         { "active_anon",                NR_ACTIVE_ANON                  },
1522         { "inactive_file",              NR_INACTIVE_FILE                },
1523         { "active_file",                NR_ACTIVE_FILE                  },
1524         { "unevictable",                NR_UNEVICTABLE                  },
1525         { "slab_reclaimable",           NR_SLAB_RECLAIMABLE_B           },
1526         { "slab_unreclaimable",         NR_SLAB_UNRECLAIMABLE_B         },
1527
1528         /* The memory events */
1529         { "workingset_refault_anon",    WORKINGSET_REFAULT_ANON         },
1530         { "workingset_refault_file",    WORKINGSET_REFAULT_FILE         },
1531         { "workingset_activate_anon",   WORKINGSET_ACTIVATE_ANON        },
1532         { "workingset_activate_file",   WORKINGSET_ACTIVATE_FILE        },
1533         { "workingset_restore_anon",    WORKINGSET_RESTORE_ANON         },
1534         { "workingset_restore_file",    WORKINGSET_RESTORE_FILE         },
1535         { "workingset_nodereclaim",     WORKINGSET_NODERECLAIM          },
1536 };
1537
1538 /* Translate stat items to the correct unit for memory.stat output */
1539 static int memcg_page_state_unit(int item)
1540 {
1541         switch (item) {
1542         case MEMCG_PERCPU_B:
1543         case MEMCG_ZSWAP_B:
1544         case NR_SLAB_RECLAIMABLE_B:
1545         case NR_SLAB_UNRECLAIMABLE_B:
1546         case WORKINGSET_REFAULT_ANON:
1547         case WORKINGSET_REFAULT_FILE:
1548         case WORKINGSET_ACTIVATE_ANON:
1549         case WORKINGSET_ACTIVATE_FILE:
1550         case WORKINGSET_RESTORE_ANON:
1551         case WORKINGSET_RESTORE_FILE:
1552         case WORKINGSET_NODERECLAIM:
1553                 return 1;
1554         case NR_KERNEL_STACK_KB:
1555                 return SZ_1K;
1556         default:
1557                 return PAGE_SIZE;
1558         }
1559 }
1560
1561 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1562                                                     int item)
1563 {
1564         return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
1565 }
1566
1567 static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1568 {
1569         int i;
1570
1571         /*
1572          * Provide statistics on the state of the memory subsystem as
1573          * well as cumulative event counters that show past behavior.
1574          *
1575          * This list is ordered following a combination of these gradients:
1576          * 1) generic big picture -> specifics and details
1577          * 2) reflecting userspace activity -> reflecting kernel heuristics
1578          *
1579          * Current memory state:
1580          */
1581         mem_cgroup_flush_stats();
1582
1583         for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1584                 u64 size;
1585
1586                 size = memcg_page_state_output(memcg, memory_stats[i].idx);
1587                 seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size);
1588
1589                 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1590                         size += memcg_page_state_output(memcg,
1591                                                         NR_SLAB_RECLAIMABLE_B);
1592                         seq_buf_printf(s, "slab %llu\n", size);
1593                 }
1594         }
1595
1596         /* Accumulated memory events */
1597         seq_buf_printf(s, "pgscan %lu\n",
1598                        memcg_events(memcg, PGSCAN_KSWAPD) +
1599                        memcg_events(memcg, PGSCAN_DIRECT) +
1600                        memcg_events(memcg, PGSCAN_KHUGEPAGED));
1601         seq_buf_printf(s, "pgsteal %lu\n",
1602                        memcg_events(memcg, PGSTEAL_KSWAPD) +
1603                        memcg_events(memcg, PGSTEAL_DIRECT) +
1604                        memcg_events(memcg, PGSTEAL_KHUGEPAGED));
1605
1606         for (i = 0; i < ARRAY_SIZE(memcg_vm_event_stat); i++) {
1607                 if (memcg_vm_event_stat[i] == PGPGIN ||
1608                     memcg_vm_event_stat[i] == PGPGOUT)
1609                         continue;
1610
1611                 seq_buf_printf(s, "%s %lu\n",
1612                                vm_event_name(memcg_vm_event_stat[i]),
1613                                memcg_events(memcg, memcg_vm_event_stat[i]));
1614         }
1615
1616         /* The above should easily fit into one page */
1617         WARN_ON_ONCE(seq_buf_has_overflowed(s));
1618 }
1619
1620 static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s);
1621
1622 static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
1623 {
1624         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1625                 memcg_stat_format(memcg, s);
1626         else
1627                 memcg1_stat_format(memcg, s);
1628         WARN_ON_ONCE(seq_buf_has_overflowed(s));
1629 }
1630
1631 /**
1632  * mem_cgroup_print_oom_context: Print OOM information relevant to
1633  * memory controller.
1634  * @memcg: The memory cgroup that went over limit
1635  * @p: Task that is going to be killed
1636  *
1637  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1638  * enabled
1639  */
1640 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1641 {
1642         rcu_read_lock();
1643
1644         if (memcg) {
1645                 pr_cont(",oom_memcg=");
1646                 pr_cont_cgroup_path(memcg->css.cgroup);
1647         } else
1648                 pr_cont(",global_oom");
1649         if (p) {
1650                 pr_cont(",task_memcg=");
1651                 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1652         }
1653         rcu_read_unlock();
1654 }
1655
1656 /**
1657  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1658  * memory controller.
1659  * @memcg: The memory cgroup that went over limit
1660  */
1661 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1662 {
1663         /* Use static buffer, for the caller is holding oom_lock. */
1664         static char buf[PAGE_SIZE];
1665         struct seq_buf s;
1666
1667         lockdep_assert_held(&oom_lock);
1668
1669         pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1670                 K((u64)page_counter_read(&memcg->memory)),
1671                 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1672         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1673                 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1674                         K((u64)page_counter_read(&memcg->swap)),
1675                         K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1676         else {
1677                 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1678                         K((u64)page_counter_read(&memcg->memsw)),
1679                         K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1680                 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1681                         K((u64)page_counter_read(&memcg->kmem)),
1682                         K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1683         }
1684
1685         pr_info("Memory cgroup stats for ");
1686         pr_cont_cgroup_path(memcg->css.cgroup);
1687         pr_cont(":");
1688         seq_buf_init(&s, buf, sizeof(buf));
1689         memory_stat_format(memcg, &s);
1690         seq_buf_do_printk(&s, KERN_INFO);
1691 }
1692
1693 /*
1694  * Return the memory (and swap, if configured) limit for a memcg.
1695  */
1696 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1697 {
1698         unsigned long max = READ_ONCE(memcg->memory.max);
1699
1700         if (do_memsw_account()) {
1701                 if (mem_cgroup_swappiness(memcg)) {
1702                         /* Calculate swap excess capacity from memsw limit */
1703                         unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1704
1705                         max += min(swap, (unsigned long)total_swap_pages);
1706                 }
1707         } else {
1708                 if (mem_cgroup_swappiness(memcg))
1709                         max += min(READ_ONCE(memcg->swap.max),
1710                                    (unsigned long)total_swap_pages);
1711         }
1712         return max;
1713 }
1714
1715 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1716 {
1717         return page_counter_read(&memcg->memory);
1718 }
1719
1720 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1721                                      int order)
1722 {
1723         struct oom_control oc = {
1724                 .zonelist = NULL,
1725                 .nodemask = NULL,
1726                 .memcg = memcg,
1727                 .gfp_mask = gfp_mask,
1728                 .order = order,
1729         };
1730         bool ret = true;
1731
1732         if (mutex_lock_killable(&oom_lock))
1733                 return true;
1734
1735         if (mem_cgroup_margin(memcg) >= (1 << order))
1736                 goto unlock;
1737
1738         /*
1739          * A few threads which were not waiting at mutex_lock_killable() can
1740          * fail to bail out. Therefore, check again after holding oom_lock.
1741          */
1742         ret = task_is_dying() || out_of_memory(&oc);
1743
1744 unlock:
1745         mutex_unlock(&oom_lock);
1746         return ret;
1747 }
1748
1749 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1750                                    pg_data_t *pgdat,
1751                                    gfp_t gfp_mask,
1752                                    unsigned long *total_scanned)
1753 {
1754         struct mem_cgroup *victim = NULL;
1755         int total = 0;
1756         int loop = 0;
1757         unsigned long excess;
1758         unsigned long nr_scanned;
1759         struct mem_cgroup_reclaim_cookie reclaim = {
1760                 .pgdat = pgdat,
1761         };
1762
1763         excess = soft_limit_excess(root_memcg);
1764
1765         while (1) {
1766                 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1767                 if (!victim) {
1768                         loop++;
1769                         if (loop >= 2) {
1770                                 /*
1771                                  * If we have not been able to reclaim
1772                                  * anything, it might because there are
1773                                  * no reclaimable pages under this hierarchy
1774                                  */
1775                                 if (!total)
1776                                         break;
1777                                 /*
1778                                  * We want to do more targeted reclaim.
1779                                  * excess >> 2 is not to excessive so as to
1780                                  * reclaim too much, nor too less that we keep
1781                                  * coming back to reclaim from this cgroup
1782                                  */
1783                                 if (total >= (excess >> 2) ||
1784                                         (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1785                                         break;
1786                         }
1787                         continue;
1788                 }
1789                 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1790                                         pgdat, &nr_scanned);
1791                 *total_scanned += nr_scanned;
1792                 if (!soft_limit_excess(root_memcg))
1793                         break;
1794         }
1795         mem_cgroup_iter_break(root_memcg, victim);
1796         return total;
1797 }
1798
1799 #ifdef CONFIG_LOCKDEP
1800 static struct lockdep_map memcg_oom_lock_dep_map = {
1801         .name = "memcg_oom_lock",
1802 };
1803 #endif
1804
1805 static DEFINE_SPINLOCK(memcg_oom_lock);
1806
1807 /*
1808  * Check OOM-Killer is already running under our hierarchy.
1809  * If someone is running, return false.
1810  */
1811 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1812 {
1813         struct mem_cgroup *iter, *failed = NULL;
1814
1815         spin_lock(&memcg_oom_lock);
1816
1817         for_each_mem_cgroup_tree(iter, memcg) {
1818                 if (iter->oom_lock) {
1819                         /*
1820                          * this subtree of our hierarchy is already locked
1821                          * so we cannot give a lock.
1822                          */
1823                         failed = iter;
1824                         mem_cgroup_iter_break(memcg, iter);
1825                         break;
1826                 } else
1827                         iter->oom_lock = true;
1828         }
1829
1830         if (failed) {
1831                 /*
1832                  * OK, we failed to lock the whole subtree so we have
1833                  * to clean up what we set up to the failing subtree
1834                  */
1835                 for_each_mem_cgroup_tree(iter, memcg) {
1836                         if (iter == failed) {
1837                                 mem_cgroup_iter_break(memcg, iter);
1838                                 break;
1839                         }
1840                         iter->oom_lock = false;
1841                 }
1842         } else
1843                 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1844
1845         spin_unlock(&memcg_oom_lock);
1846
1847         return !failed;
1848 }
1849
1850 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1851 {
1852         struct mem_cgroup *iter;
1853
1854         spin_lock(&memcg_oom_lock);
1855         mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1856         for_each_mem_cgroup_tree(iter, memcg)
1857                 iter->oom_lock = false;
1858         spin_unlock(&memcg_oom_lock);
1859 }
1860
1861 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1862 {
1863         struct mem_cgroup *iter;
1864
1865         spin_lock(&memcg_oom_lock);
1866         for_each_mem_cgroup_tree(iter, memcg)
1867                 iter->under_oom++;
1868         spin_unlock(&memcg_oom_lock);
1869 }
1870
1871 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1872 {
1873         struct mem_cgroup *iter;
1874
1875         /*
1876          * Be careful about under_oom underflows because a child memcg
1877          * could have been added after mem_cgroup_mark_under_oom.
1878          */
1879         spin_lock(&memcg_oom_lock);
1880         for_each_mem_cgroup_tree(iter, memcg)
1881                 if (iter->under_oom > 0)
1882                         iter->under_oom--;
1883         spin_unlock(&memcg_oom_lock);
1884 }
1885
1886 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1887
1888 struct oom_wait_info {
1889         struct mem_cgroup *memcg;
1890         wait_queue_entry_t      wait;
1891 };
1892
1893 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1894         unsigned mode, int sync, void *arg)
1895 {
1896         struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1897         struct mem_cgroup *oom_wait_memcg;
1898         struct oom_wait_info *oom_wait_info;
1899
1900         oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1901         oom_wait_memcg = oom_wait_info->memcg;
1902
1903         if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1904             !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1905                 return 0;
1906         return autoremove_wake_function(wait, mode, sync, arg);
1907 }
1908
1909 static void memcg_oom_recover(struct mem_cgroup *memcg)
1910 {
1911         /*
1912          * For the following lockless ->under_oom test, the only required
1913          * guarantee is that it must see the state asserted by an OOM when
1914          * this function is called as a result of userland actions
1915          * triggered by the notification of the OOM.  This is trivially
1916          * achieved by invoking mem_cgroup_mark_under_oom() before
1917          * triggering notification.
1918          */
1919         if (memcg && memcg->under_oom)
1920                 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1921 }
1922
1923 /*
1924  * Returns true if successfully killed one or more processes. Though in some
1925  * corner cases it can return true even without killing any process.
1926  */
1927 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1928 {
1929         bool locked, ret;
1930
1931         if (order > PAGE_ALLOC_COSTLY_ORDER)
1932                 return false;
1933
1934         memcg_memory_event(memcg, MEMCG_OOM);
1935
1936         /*
1937          * We are in the middle of the charge context here, so we
1938          * don't want to block when potentially sitting on a callstack
1939          * that holds all kinds of filesystem and mm locks.
1940          *
1941          * cgroup1 allows disabling the OOM killer and waiting for outside
1942          * handling until the charge can succeed; remember the context and put
1943          * the task to sleep at the end of the page fault when all locks are
1944          * released.
1945          *
1946          * On the other hand, in-kernel OOM killer allows for an async victim
1947          * memory reclaim (oom_reaper) and that means that we are not solely
1948          * relying on the oom victim to make a forward progress and we can
1949          * invoke the oom killer here.
1950          *
1951          * Please note that mem_cgroup_out_of_memory might fail to find a
1952          * victim and then we have to bail out from the charge path.
1953          */
1954         if (READ_ONCE(memcg->oom_kill_disable)) {
1955                 if (current->in_user_fault) {
1956                         css_get(&memcg->css);
1957                         current->memcg_in_oom = memcg;
1958                         current->memcg_oom_gfp_mask = mask;
1959                         current->memcg_oom_order = order;
1960                 }
1961                 return false;
1962         }
1963
1964         mem_cgroup_mark_under_oom(memcg);
1965
1966         locked = mem_cgroup_oom_trylock(memcg);
1967
1968         if (locked)
1969                 mem_cgroup_oom_notify(memcg);
1970
1971         mem_cgroup_unmark_under_oom(memcg);
1972         ret = mem_cgroup_out_of_memory(memcg, mask, order);
1973
1974         if (locked)
1975                 mem_cgroup_oom_unlock(memcg);
1976
1977         return ret;
1978 }
1979
1980 /**
1981  * mem_cgroup_oom_synchronize - complete memcg OOM handling
1982  * @handle: actually kill/wait or just clean up the OOM state
1983  *
1984  * This has to be called at the end of a page fault if the memcg OOM
1985  * handler was enabled.
1986  *
1987  * Memcg supports userspace OOM handling where failed allocations must
1988  * sleep on a waitqueue until the userspace task resolves the
1989  * situation.  Sleeping directly in the charge context with all kinds
1990  * of locks held is not a good idea, instead we remember an OOM state
1991  * in the task and mem_cgroup_oom_synchronize() has to be called at
1992  * the end of the page fault to complete the OOM handling.
1993  *
1994  * Returns %true if an ongoing memcg OOM situation was detected and
1995  * completed, %false otherwise.
1996  */
1997 bool mem_cgroup_oom_synchronize(bool handle)
1998 {
1999         struct mem_cgroup *memcg = current->memcg_in_oom;
2000         struct oom_wait_info owait;
2001         bool locked;
2002
2003         /* OOM is global, do not handle */
2004         if (!memcg)
2005                 return false;
2006
2007         if (!handle)
2008                 goto cleanup;
2009
2010         owait.memcg = memcg;
2011         owait.wait.flags = 0;
2012         owait.wait.func = memcg_oom_wake_function;
2013         owait.wait.private = current;
2014         INIT_LIST_HEAD(&owait.wait.entry);
2015
2016         prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
2017         mem_cgroup_mark_under_oom(memcg);
2018
2019         locked = mem_cgroup_oom_trylock(memcg);
2020
2021         if (locked)
2022                 mem_cgroup_oom_notify(memcg);
2023
2024         schedule();
2025         mem_cgroup_unmark_under_oom(memcg);
2026         finish_wait(&memcg_oom_waitq, &owait.wait);
2027
2028         if (locked)
2029                 mem_cgroup_oom_unlock(memcg);
2030 cleanup:
2031         current->memcg_in_oom = NULL;
2032         css_put(&memcg->css);
2033         return true;
2034 }
2035
2036 /**
2037  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
2038  * @victim: task to be killed by the OOM killer
2039  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
2040  *
2041  * Returns a pointer to a memory cgroup, which has to be cleaned up
2042  * by killing all belonging OOM-killable tasks.
2043  *
2044  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
2045  */
2046 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
2047                                             struct mem_cgroup *oom_domain)
2048 {
2049         struct mem_cgroup *oom_group = NULL;
2050         struct mem_cgroup *memcg;
2051
2052         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2053                 return NULL;
2054
2055         if (!oom_domain)
2056                 oom_domain = root_mem_cgroup;
2057
2058         rcu_read_lock();
2059
2060         memcg = mem_cgroup_from_task(victim);
2061         if (mem_cgroup_is_root(memcg))
2062                 goto out;
2063
2064         /*
2065          * If the victim task has been asynchronously moved to a different
2066          * memory cgroup, we might end up killing tasks outside oom_domain.
2067          * In this case it's better to ignore memory.group.oom.
2068          */
2069         if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
2070                 goto out;
2071
2072         /*
2073          * Traverse the memory cgroup hierarchy from the victim task's
2074          * cgroup up to the OOMing cgroup (or root) to find the
2075          * highest-level memory cgroup with oom.group set.
2076          */
2077         for (; memcg; memcg = parent_mem_cgroup(memcg)) {
2078                 if (READ_ONCE(memcg->oom_group))
2079                         oom_group = memcg;
2080
2081                 if (memcg == oom_domain)
2082                         break;
2083         }
2084
2085         if (oom_group)
2086                 css_get(&oom_group->css);
2087 out:
2088         rcu_read_unlock();
2089
2090         return oom_group;
2091 }
2092
2093 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2094 {
2095         pr_info("Tasks in ");
2096         pr_cont_cgroup_path(memcg->css.cgroup);
2097         pr_cont(" are going to be killed due to memory.oom.group set\n");
2098 }
2099
2100 /**
2101  * folio_memcg_lock - Bind a folio to its memcg.
2102  * @folio: The folio.
2103  *
2104  * This function prevents unlocked LRU folios from being moved to
2105  * another cgroup.
2106  *
2107  * It ensures lifetime of the bound memcg.  The caller is responsible
2108  * for the lifetime of the folio.
2109  */
2110 void folio_memcg_lock(struct folio *folio)
2111 {
2112         struct mem_cgroup *memcg;
2113         unsigned long flags;
2114
2115         /*
2116          * The RCU lock is held throughout the transaction.  The fast
2117          * path can get away without acquiring the memcg->move_lock
2118          * because page moving starts with an RCU grace period.
2119          */
2120         rcu_read_lock();
2121
2122         if (mem_cgroup_disabled())
2123                 return;
2124 again:
2125         memcg = folio_memcg(folio);
2126         if (unlikely(!memcg))
2127                 return;
2128
2129 #ifdef CONFIG_PROVE_LOCKING
2130         local_irq_save(flags);
2131         might_lock(&memcg->move_lock);
2132         local_irq_restore(flags);
2133 #endif
2134
2135         if (atomic_read(&memcg->moving_account) <= 0)
2136                 return;
2137
2138         spin_lock_irqsave(&memcg->move_lock, flags);
2139         if (memcg != folio_memcg(folio)) {
2140                 spin_unlock_irqrestore(&memcg->move_lock, flags);
2141                 goto again;
2142         }
2143
2144         /*
2145          * When charge migration first begins, we can have multiple
2146          * critical sections holding the fast-path RCU lock and one
2147          * holding the slowpath move_lock. Track the task who has the
2148          * move_lock for folio_memcg_unlock().
2149          */
2150         memcg->move_lock_task = current;
2151         memcg->move_lock_flags = flags;
2152 }
2153
2154 static void __folio_memcg_unlock(struct mem_cgroup *memcg)
2155 {
2156         if (memcg && memcg->move_lock_task == current) {
2157                 unsigned long flags = memcg->move_lock_flags;
2158
2159                 memcg->move_lock_task = NULL;
2160                 memcg->move_lock_flags = 0;
2161
2162                 spin_unlock_irqrestore(&memcg->move_lock, flags);
2163         }
2164
2165         rcu_read_unlock();
2166 }
2167
2168 /**
2169  * folio_memcg_unlock - Release the binding between a folio and its memcg.
2170  * @folio: The folio.
2171  *
2172  * This releases the binding created by folio_memcg_lock().  This does
2173  * not change the accounting of this folio to its memcg, but it does
2174  * permit others to change it.
2175  */
2176 void folio_memcg_unlock(struct folio *folio)
2177 {
2178         __folio_memcg_unlock(folio_memcg(folio));
2179 }
2180
2181 struct memcg_stock_pcp {
2182         local_lock_t stock_lock;
2183         struct mem_cgroup *cached; /* this never be root cgroup */
2184         unsigned int nr_pages;
2185
2186 #ifdef CONFIG_MEMCG_KMEM
2187         struct obj_cgroup *cached_objcg;
2188         struct pglist_data *cached_pgdat;
2189         unsigned int nr_bytes;
2190         int nr_slab_reclaimable_b;
2191         int nr_slab_unreclaimable_b;
2192 #endif
2193
2194         struct work_struct work;
2195         unsigned long flags;
2196 #define FLUSHING_CACHED_CHARGE  0
2197 };
2198 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2199         .stock_lock = INIT_LOCAL_LOCK(stock_lock),
2200 };
2201 static DEFINE_MUTEX(percpu_charge_mutex);
2202
2203 #ifdef CONFIG_MEMCG_KMEM
2204 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
2205 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2206                                      struct mem_cgroup *root_memcg);
2207 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
2208
2209 #else
2210 static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2211 {
2212         return NULL;
2213 }
2214 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2215                                      struct mem_cgroup *root_memcg)
2216 {
2217         return false;
2218 }
2219 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2220 {
2221 }
2222 #endif
2223
2224 /**
2225  * consume_stock: Try to consume stocked charge on this cpu.
2226  * @memcg: memcg to consume from.
2227  * @nr_pages: how many pages to charge.
2228  *
2229  * The charges will only happen if @memcg matches the current cpu's memcg
2230  * stock, and at least @nr_pages are available in that stock.  Failure to
2231  * service an allocation will refill the stock.
2232  *
2233  * returns true if successful, false otherwise.
2234  */
2235 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2236 {
2237         struct memcg_stock_pcp *stock;
2238         unsigned long flags;
2239         bool ret = false;
2240
2241         if (nr_pages > MEMCG_CHARGE_BATCH)
2242                 return ret;
2243
2244         local_lock_irqsave(&memcg_stock.stock_lock, flags);
2245
2246         stock = this_cpu_ptr(&memcg_stock);
2247         if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) {
2248                 stock->nr_pages -= nr_pages;
2249                 ret = true;
2250         }
2251
2252         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2253
2254         return ret;
2255 }
2256
2257 /*
2258  * Returns stocks cached in percpu and reset cached information.
2259  */
2260 static void drain_stock(struct memcg_stock_pcp *stock)
2261 {
2262         struct mem_cgroup *old = READ_ONCE(stock->cached);
2263
2264         if (!old)
2265                 return;
2266
2267         if (stock->nr_pages) {
2268                 page_counter_uncharge(&old->memory, stock->nr_pages);
2269                 if (do_memsw_account())
2270                         page_counter_uncharge(&old->memsw, stock->nr_pages);
2271                 stock->nr_pages = 0;
2272         }
2273
2274         css_put(&old->css);
2275         WRITE_ONCE(stock->cached, NULL);
2276 }
2277
2278 static void drain_local_stock(struct work_struct *dummy)
2279 {
2280         struct memcg_stock_pcp *stock;
2281         struct obj_cgroup *old = NULL;
2282         unsigned long flags;
2283
2284         /*
2285          * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2286          * drain_stock races is that we always operate on local CPU stock
2287          * here with IRQ disabled
2288          */
2289         local_lock_irqsave(&memcg_stock.stock_lock, flags);
2290
2291         stock = this_cpu_ptr(&memcg_stock);
2292         old = drain_obj_stock(stock);
2293         drain_stock(stock);
2294         clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2295
2296         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2297         if (old)
2298                 obj_cgroup_put(old);
2299 }
2300
2301 /*
2302  * Cache charges(val) to local per_cpu area.
2303  * This will be consumed by consume_stock() function, later.
2304  */
2305 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2306 {
2307         struct memcg_stock_pcp *stock;
2308
2309         stock = this_cpu_ptr(&memcg_stock);
2310         if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */
2311                 drain_stock(stock);
2312                 css_get(&memcg->css);
2313                 WRITE_ONCE(stock->cached, memcg);
2314         }
2315         stock->nr_pages += nr_pages;
2316
2317         if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2318                 drain_stock(stock);
2319 }
2320
2321 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2322 {
2323         unsigned long flags;
2324
2325         local_lock_irqsave(&memcg_stock.stock_lock, flags);
2326         __refill_stock(memcg, nr_pages);
2327         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2328 }
2329
2330 /*
2331  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2332  * of the hierarchy under it.
2333  */
2334 static void drain_all_stock(struct mem_cgroup *root_memcg)
2335 {
2336         int cpu, curcpu;
2337
2338         /* If someone's already draining, avoid adding running more workers. */
2339         if (!mutex_trylock(&percpu_charge_mutex))
2340                 return;
2341         /*
2342          * Notify other cpus that system-wide "drain" is running
2343          * We do not care about races with the cpu hotplug because cpu down
2344          * as well as workers from this path always operate on the local
2345          * per-cpu data. CPU up doesn't touch memcg_stock at all.
2346          */
2347         migrate_disable();
2348         curcpu = smp_processor_id();
2349         for_each_online_cpu(cpu) {
2350                 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2351                 struct mem_cgroup *memcg;
2352                 bool flush = false;
2353
2354                 rcu_read_lock();
2355                 memcg = READ_ONCE(stock->cached);
2356                 if (memcg && stock->nr_pages &&
2357                     mem_cgroup_is_descendant(memcg, root_memcg))
2358                         flush = true;
2359                 else if (obj_stock_flush_required(stock, root_memcg))
2360                         flush = true;
2361                 rcu_read_unlock();
2362
2363                 if (flush &&
2364                     !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2365                         if (cpu == curcpu)
2366                                 drain_local_stock(&stock->work);
2367                         else if (!cpu_is_isolated(cpu))
2368                                 schedule_work_on(cpu, &stock->work);
2369                 }
2370         }
2371         migrate_enable();
2372         mutex_unlock(&percpu_charge_mutex);
2373 }
2374
2375 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2376 {
2377         struct memcg_stock_pcp *stock;
2378
2379         stock = &per_cpu(memcg_stock, cpu);
2380         drain_stock(stock);
2381
2382         return 0;
2383 }
2384
2385 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2386                                   unsigned int nr_pages,
2387                                   gfp_t gfp_mask)
2388 {
2389         unsigned long nr_reclaimed = 0;
2390
2391         do {
2392                 unsigned long pflags;
2393
2394                 if (page_counter_read(&memcg->memory) <=
2395                     READ_ONCE(memcg->memory.high))
2396                         continue;
2397
2398                 memcg_memory_event(memcg, MEMCG_HIGH);
2399
2400                 psi_memstall_enter(&pflags);
2401                 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2402                                                         gfp_mask,
2403                                                         MEMCG_RECLAIM_MAY_SWAP);
2404                 psi_memstall_leave(&pflags);
2405         } while ((memcg = parent_mem_cgroup(memcg)) &&
2406                  !mem_cgroup_is_root(memcg));
2407
2408         return nr_reclaimed;
2409 }
2410
2411 static void high_work_func(struct work_struct *work)
2412 {
2413         struct mem_cgroup *memcg;
2414
2415         memcg = container_of(work, struct mem_cgroup, high_work);
2416         reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2417 }
2418
2419 /*
2420  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2421  * enough to still cause a significant slowdown in most cases, while still
2422  * allowing diagnostics and tracing to proceed without becoming stuck.
2423  */
2424 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2425
2426 /*
2427  * When calculating the delay, we use these either side of the exponentiation to
2428  * maintain precision and scale to a reasonable number of jiffies (see the table
2429  * below.
2430  *
2431  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2432  *   overage ratio to a delay.
2433  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2434  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2435  *   to produce a reasonable delay curve.
2436  *
2437  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2438  * reasonable delay curve compared to precision-adjusted overage, not
2439  * penalising heavily at first, but still making sure that growth beyond the
2440  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2441  * example, with a high of 100 megabytes:
2442  *
2443  *  +-------+------------------------+
2444  *  | usage | time to allocate in ms |
2445  *  +-------+------------------------+
2446  *  | 100M  |                      0 |
2447  *  | 101M  |                      6 |
2448  *  | 102M  |                     25 |
2449  *  | 103M  |                     57 |
2450  *  | 104M  |                    102 |
2451  *  | 105M  |                    159 |
2452  *  | 106M  |                    230 |
2453  *  | 107M  |                    313 |
2454  *  | 108M  |                    409 |
2455  *  | 109M  |                    518 |
2456  *  | 110M  |                    639 |
2457  *  | 111M  |                    774 |
2458  *  | 112M  |                    921 |
2459  *  | 113M  |                   1081 |
2460  *  | 114M  |                   1254 |
2461  *  | 115M  |                   1439 |
2462  *  | 116M  |                   1638 |
2463  *  | 117M  |                   1849 |
2464  *  | 118M  |                   2000 |
2465  *  | 119M  |                   2000 |
2466  *  | 120M  |                   2000 |
2467  *  +-------+------------------------+
2468  */
2469  #define MEMCG_DELAY_PRECISION_SHIFT 20
2470  #define MEMCG_DELAY_SCALING_SHIFT 14
2471
2472 static u64 calculate_overage(unsigned long usage, unsigned long high)
2473 {
2474         u64 overage;
2475
2476         if (usage <= high)
2477                 return 0;
2478
2479         /*
2480          * Prevent division by 0 in overage calculation by acting as if
2481          * it was a threshold of 1 page
2482          */
2483         high = max(high, 1UL);
2484
2485         overage = usage - high;
2486         overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2487         return div64_u64(overage, high);
2488 }
2489
2490 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2491 {
2492         u64 overage, max_overage = 0;
2493
2494         do {
2495                 overage = calculate_overage(page_counter_read(&memcg->memory),
2496                                             READ_ONCE(memcg->memory.high));
2497                 max_overage = max(overage, max_overage);
2498         } while ((memcg = parent_mem_cgroup(memcg)) &&
2499                  !mem_cgroup_is_root(memcg));
2500
2501         return max_overage;
2502 }
2503
2504 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2505 {
2506         u64 overage, max_overage = 0;
2507
2508         do {
2509                 overage = calculate_overage(page_counter_read(&memcg->swap),
2510                                             READ_ONCE(memcg->swap.high));
2511                 if (overage)
2512                         memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2513                 max_overage = max(overage, max_overage);
2514         } while ((memcg = parent_mem_cgroup(memcg)) &&
2515                  !mem_cgroup_is_root(memcg));
2516
2517         return max_overage;
2518 }
2519
2520 /*
2521  * Get the number of jiffies that we should penalise a mischievous cgroup which
2522  * is exceeding its memory.high by checking both it and its ancestors.
2523  */
2524 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2525                                           unsigned int nr_pages,
2526                                           u64 max_overage)
2527 {
2528         unsigned long penalty_jiffies;
2529
2530         if (!max_overage)
2531                 return 0;
2532
2533         /*
2534          * We use overage compared to memory.high to calculate the number of
2535          * jiffies to sleep (penalty_jiffies). Ideally this value should be
2536          * fairly lenient on small overages, and increasingly harsh when the
2537          * memcg in question makes it clear that it has no intention of stopping
2538          * its crazy behaviour, so we exponentially increase the delay based on
2539          * overage amount.
2540          */
2541         penalty_jiffies = max_overage * max_overage * HZ;
2542         penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2543         penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2544
2545         /*
2546          * Factor in the task's own contribution to the overage, such that four
2547          * N-sized allocations are throttled approximately the same as one
2548          * 4N-sized allocation.
2549          *
2550          * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2551          * larger the current charge patch is than that.
2552          */
2553         return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2554 }
2555
2556 /*
2557  * Scheduled by try_charge() to be executed from the userland return path
2558  * and reclaims memory over the high limit.
2559  */
2560 void mem_cgroup_handle_over_high(gfp_t gfp_mask)
2561 {
2562         unsigned long penalty_jiffies;
2563         unsigned long pflags;
2564         unsigned long nr_reclaimed;
2565         unsigned int nr_pages = current->memcg_nr_pages_over_high;
2566         int nr_retries = MAX_RECLAIM_RETRIES;
2567         struct mem_cgroup *memcg;
2568         bool in_retry = false;
2569
2570         if (likely(!nr_pages))
2571                 return;
2572
2573         memcg = get_mem_cgroup_from_mm(current->mm);
2574         current->memcg_nr_pages_over_high = 0;
2575
2576 retry_reclaim:
2577         /*
2578          * The allocating task should reclaim at least the batch size, but for
2579          * subsequent retries we only want to do what's necessary to prevent oom
2580          * or breaching resource isolation.
2581          *
2582          * This is distinct from memory.max or page allocator behaviour because
2583          * memory.high is currently batched, whereas memory.max and the page
2584          * allocator run every time an allocation is made.
2585          */
2586         nr_reclaimed = reclaim_high(memcg,
2587                                     in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2588                                     gfp_mask);
2589
2590         /*
2591          * memory.high is breached and reclaim is unable to keep up. Throttle
2592          * allocators proactively to slow down excessive growth.
2593          */
2594         penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2595                                                mem_find_max_overage(memcg));
2596
2597         penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2598                                                 swap_find_max_overage(memcg));
2599
2600         /*
2601          * Clamp the max delay per usermode return so as to still keep the
2602          * application moving forwards and also permit diagnostics, albeit
2603          * extremely slowly.
2604          */
2605         penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2606
2607         /*
2608          * Don't sleep if the amount of jiffies this memcg owes us is so low
2609          * that it's not even worth doing, in an attempt to be nice to those who
2610          * go only a small amount over their memory.high value and maybe haven't
2611          * been aggressively reclaimed enough yet.
2612          */
2613         if (penalty_jiffies <= HZ / 100)
2614                 goto out;
2615
2616         /*
2617          * If reclaim is making forward progress but we're still over
2618          * memory.high, we want to encourage that rather than doing allocator
2619          * throttling.
2620          */
2621         if (nr_reclaimed || nr_retries--) {
2622                 in_retry = true;
2623                 goto retry_reclaim;
2624         }
2625
2626         /*
2627          * If we exit early, we're guaranteed to die (since
2628          * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2629          * need to account for any ill-begotten jiffies to pay them off later.
2630          */
2631         psi_memstall_enter(&pflags);
2632         schedule_timeout_killable(penalty_jiffies);
2633         psi_memstall_leave(&pflags);
2634
2635 out:
2636         css_put(&memcg->css);
2637 }
2638
2639 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2640                         unsigned int nr_pages)
2641 {
2642         unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2643         int nr_retries = MAX_RECLAIM_RETRIES;
2644         struct mem_cgroup *mem_over_limit;
2645         struct page_counter *counter;
2646         unsigned long nr_reclaimed;
2647         bool passed_oom = false;
2648         unsigned int reclaim_options = MEMCG_RECLAIM_MAY_SWAP;
2649         bool drained = false;
2650         bool raised_max_event = false;
2651         unsigned long pflags;
2652
2653 retry:
2654         if (consume_stock(memcg, nr_pages))
2655                 return 0;
2656
2657         if (!do_memsw_account() ||
2658             page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2659                 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2660                         goto done_restock;
2661                 if (do_memsw_account())
2662                         page_counter_uncharge(&memcg->memsw, batch);
2663                 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2664         } else {
2665                 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2666                 reclaim_options &= ~MEMCG_RECLAIM_MAY_SWAP;
2667         }
2668
2669         if (batch > nr_pages) {
2670                 batch = nr_pages;
2671                 goto retry;
2672         }
2673
2674         /*
2675          * Prevent unbounded recursion when reclaim operations need to
2676          * allocate memory. This might exceed the limits temporarily,
2677          * but we prefer facilitating memory reclaim and getting back
2678          * under the limit over triggering OOM kills in these cases.
2679          */
2680         if (unlikely(current->flags & PF_MEMALLOC))
2681                 goto force;
2682
2683         if (unlikely(task_in_memcg_oom(current)))
2684                 goto nomem;
2685
2686         if (!gfpflags_allow_blocking(gfp_mask))
2687                 goto nomem;
2688
2689         memcg_memory_event(mem_over_limit, MEMCG_MAX);
2690         raised_max_event = true;
2691
2692         psi_memstall_enter(&pflags);
2693         nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2694                                                     gfp_mask, reclaim_options);
2695         psi_memstall_leave(&pflags);
2696
2697         if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2698                 goto retry;
2699
2700         if (!drained) {
2701                 drain_all_stock(mem_over_limit);
2702                 drained = true;
2703                 goto retry;
2704         }
2705
2706         if (gfp_mask & __GFP_NORETRY)
2707                 goto nomem;
2708         /*
2709          * Even though the limit is exceeded at this point, reclaim
2710          * may have been able to free some pages.  Retry the charge
2711          * before killing the task.
2712          *
2713          * Only for regular pages, though: huge pages are rather
2714          * unlikely to succeed so close to the limit, and we fall back
2715          * to regular pages anyway in case of failure.
2716          */
2717         if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2718                 goto retry;
2719         /*
2720          * At task move, charge accounts can be doubly counted. So, it's
2721          * better to wait until the end of task_move if something is going on.
2722          */
2723         if (mem_cgroup_wait_acct_move(mem_over_limit))
2724                 goto retry;
2725
2726         if (nr_retries--)
2727                 goto retry;
2728
2729         if (gfp_mask & __GFP_RETRY_MAYFAIL)
2730                 goto nomem;
2731
2732         /* Avoid endless loop for tasks bypassed by the oom killer */
2733         if (passed_oom && task_is_dying())
2734                 goto nomem;
2735
2736         /*
2737          * keep retrying as long as the memcg oom killer is able to make
2738          * a forward progress or bypass the charge if the oom killer
2739          * couldn't make any progress.
2740          */
2741         if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2742                            get_order(nr_pages * PAGE_SIZE))) {
2743                 passed_oom = true;
2744                 nr_retries = MAX_RECLAIM_RETRIES;
2745                 goto retry;
2746         }
2747 nomem:
2748         /*
2749          * Memcg doesn't have a dedicated reserve for atomic
2750          * allocations. But like the global atomic pool, we need to
2751          * put the burden of reclaim on regular allocation requests
2752          * and let these go through as privileged allocations.
2753          */
2754         if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2755                 return -ENOMEM;
2756 force:
2757         /*
2758          * If the allocation has to be enforced, don't forget to raise
2759          * a MEMCG_MAX event.
2760          */
2761         if (!raised_max_event)
2762                 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2763
2764         /*
2765          * The allocation either can't fail or will lead to more memory
2766          * being freed very soon.  Allow memory usage go over the limit
2767          * temporarily by force charging it.
2768          */
2769         page_counter_charge(&memcg->memory, nr_pages);
2770         if (do_memsw_account())
2771                 page_counter_charge(&memcg->memsw, nr_pages);
2772
2773         return 0;
2774
2775 done_restock:
2776         if (batch > nr_pages)
2777                 refill_stock(memcg, batch - nr_pages);
2778
2779         /*
2780          * If the hierarchy is above the normal consumption range, schedule
2781          * reclaim on returning to userland.  We can perform reclaim here
2782          * if __GFP_RECLAIM but let's always punt for simplicity and so that
2783          * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2784          * not recorded as it most likely matches current's and won't
2785          * change in the meantime.  As high limit is checked again before
2786          * reclaim, the cost of mismatch is negligible.
2787          */
2788         do {
2789                 bool mem_high, swap_high;
2790
2791                 mem_high = page_counter_read(&memcg->memory) >
2792                         READ_ONCE(memcg->memory.high);
2793                 swap_high = page_counter_read(&memcg->swap) >
2794                         READ_ONCE(memcg->swap.high);
2795
2796                 /* Don't bother a random interrupted task */
2797                 if (!in_task()) {
2798                         if (mem_high) {
2799                                 schedule_work(&memcg->high_work);
2800                                 break;
2801                         }
2802                         continue;
2803                 }
2804
2805                 if (mem_high || swap_high) {
2806                         /*
2807                          * The allocating tasks in this cgroup will need to do
2808                          * reclaim or be throttled to prevent further growth
2809                          * of the memory or swap footprints.
2810                          *
2811                          * Target some best-effort fairness between the tasks,
2812                          * and distribute reclaim work and delay penalties
2813                          * based on how much each task is actually allocating.
2814                          */
2815                         current->memcg_nr_pages_over_high += batch;
2816                         set_notify_resume(current);
2817                         break;
2818                 }
2819         } while ((memcg = parent_mem_cgroup(memcg)));
2820
2821         if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2822             !(current->flags & PF_MEMALLOC) &&
2823             gfpflags_allow_blocking(gfp_mask)) {
2824                 mem_cgroup_handle_over_high(gfp_mask);
2825         }
2826         return 0;
2827 }
2828
2829 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2830                              unsigned int nr_pages)
2831 {
2832         if (mem_cgroup_is_root(memcg))
2833                 return 0;
2834
2835         return try_charge_memcg(memcg, gfp_mask, nr_pages);
2836 }
2837
2838 static inline void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2839 {
2840         if (mem_cgroup_is_root(memcg))
2841                 return;
2842
2843         page_counter_uncharge(&memcg->memory, nr_pages);
2844         if (do_memsw_account())
2845                 page_counter_uncharge(&memcg->memsw, nr_pages);
2846 }
2847
2848 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2849 {
2850         VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2851         /*
2852          * Any of the following ensures page's memcg stability:
2853          *
2854          * - the page lock
2855          * - LRU isolation
2856          * - folio_memcg_lock()
2857          * - exclusive reference
2858          * - mem_cgroup_trylock_pages()
2859          */
2860         folio->memcg_data = (unsigned long)memcg;
2861 }
2862
2863 #ifdef CONFIG_MEMCG_KMEM
2864 /*
2865  * The allocated objcg pointers array is not accounted directly.
2866  * Moreover, it should not come from DMA buffer and is not readily
2867  * reclaimable. So those GFP bits should be masked off.
2868  */
2869 #define OBJCGS_CLEAR_MASK       (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
2870
2871 /*
2872  * mod_objcg_mlstate() may be called with irq enabled, so
2873  * mod_memcg_lruvec_state() should be used.
2874  */
2875 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2876                                      struct pglist_data *pgdat,
2877                                      enum node_stat_item idx, int nr)
2878 {
2879         struct mem_cgroup *memcg;
2880         struct lruvec *lruvec;
2881
2882         rcu_read_lock();
2883         memcg = obj_cgroup_memcg(objcg);
2884         lruvec = mem_cgroup_lruvec(memcg, pgdat);
2885         mod_memcg_lruvec_state(lruvec, idx, nr);
2886         rcu_read_unlock();
2887 }
2888
2889 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
2890                                  gfp_t gfp, bool new_slab)
2891 {
2892         unsigned int objects = objs_per_slab(s, slab);
2893         unsigned long memcg_data;
2894         void *vec;
2895
2896         gfp &= ~OBJCGS_CLEAR_MASK;
2897         vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2898                            slab_nid(slab));
2899         if (!vec)
2900                 return -ENOMEM;
2901
2902         memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
2903         if (new_slab) {
2904                 /*
2905                  * If the slab is brand new and nobody can yet access its
2906                  * memcg_data, no synchronization is required and memcg_data can
2907                  * be simply assigned.
2908                  */
2909                 slab->memcg_data = memcg_data;
2910         } else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
2911                 /*
2912                  * If the slab is already in use, somebody can allocate and
2913                  * assign obj_cgroups in parallel. In this case the existing
2914                  * objcg vector should be reused.
2915                  */
2916                 kfree(vec);
2917                 return 0;
2918         }
2919
2920         kmemleak_not_leak(vec);
2921         return 0;
2922 }
2923
2924 static __always_inline
2925 struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p)
2926 {
2927         /*
2928          * Slab objects are accounted individually, not per-page.
2929          * Memcg membership data for each individual object is saved in
2930          * slab->memcg_data.
2931          */
2932         if (folio_test_slab(folio)) {
2933                 struct obj_cgroup **objcgs;
2934                 struct slab *slab;
2935                 unsigned int off;
2936
2937                 slab = folio_slab(folio);
2938                 objcgs = slab_objcgs(slab);
2939                 if (!objcgs)
2940                         return NULL;
2941
2942                 off = obj_to_index(slab->slab_cache, slab, p);
2943                 if (objcgs[off])
2944                         return obj_cgroup_memcg(objcgs[off]);
2945
2946                 return NULL;
2947         }
2948
2949         /*
2950          * folio_memcg_check() is used here, because in theory we can encounter
2951          * a folio where the slab flag has been cleared already, but
2952          * slab->memcg_data has not been freed yet
2953          * folio_memcg_check() will guarantee that a proper memory
2954          * cgroup pointer or NULL will be returned.
2955          */
2956         return folio_memcg_check(folio);
2957 }
2958
2959 /*
2960  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2961  *
2962  * A passed kernel object can be a slab object, vmalloc object or a generic
2963  * kernel page, so different mechanisms for getting the memory cgroup pointer
2964  * should be used.
2965  *
2966  * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
2967  * can not know for sure how the kernel object is implemented.
2968  * mem_cgroup_from_obj() can be safely used in such cases.
2969  *
2970  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2971  * cgroup_mutex, etc.
2972  */
2973 struct mem_cgroup *mem_cgroup_from_obj(void *p)
2974 {
2975         struct folio *folio;
2976
2977         if (mem_cgroup_disabled())
2978                 return NULL;
2979
2980         if (unlikely(is_vmalloc_addr(p)))
2981                 folio = page_folio(vmalloc_to_page(p));
2982         else
2983                 folio = virt_to_folio(p);
2984
2985         return mem_cgroup_from_obj_folio(folio, p);
2986 }
2987
2988 /*
2989  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2990  * Similar to mem_cgroup_from_obj(), but faster and not suitable for objects,
2991  * allocated using vmalloc().
2992  *
2993  * A passed kernel object must be a slab object or a generic kernel page.
2994  *
2995  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2996  * cgroup_mutex, etc.
2997  */
2998 struct mem_cgroup *mem_cgroup_from_slab_obj(void *p)
2999 {
3000         if (mem_cgroup_disabled())
3001                 return NULL;
3002
3003         return mem_cgroup_from_obj_folio(virt_to_folio(p), p);
3004 }
3005
3006 static struct obj_cgroup *__get_obj_cgroup_from_memcg(struct mem_cgroup *memcg)
3007 {
3008         struct obj_cgroup *objcg = NULL;
3009
3010         for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
3011                 objcg = rcu_dereference(memcg->objcg);
3012                 if (objcg && obj_cgroup_tryget(objcg))
3013                         break;
3014                 objcg = NULL;
3015         }
3016         return objcg;
3017 }
3018
3019 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
3020 {
3021         struct obj_cgroup *objcg = NULL;
3022         struct mem_cgroup *memcg;
3023
3024         if (memcg_kmem_bypass())
3025                 return NULL;
3026
3027         rcu_read_lock();
3028         if (unlikely(active_memcg()))
3029                 memcg = active_memcg();
3030         else
3031                 memcg = mem_cgroup_from_task(current);
3032         objcg = __get_obj_cgroup_from_memcg(memcg);
3033         rcu_read_unlock();
3034         return objcg;
3035 }
3036
3037 struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
3038 {
3039         struct obj_cgroup *objcg;
3040
3041         if (!memcg_kmem_online())
3042                 return NULL;
3043
3044         if (folio_memcg_kmem(folio)) {
3045                 objcg = __folio_objcg(folio);
3046                 obj_cgroup_get(objcg);
3047         } else {
3048                 struct mem_cgroup *memcg;
3049
3050                 rcu_read_lock();
3051                 memcg = __folio_memcg(folio);
3052                 if (memcg)
3053                         objcg = __get_obj_cgroup_from_memcg(memcg);
3054                 else
3055                         objcg = NULL;
3056                 rcu_read_unlock();
3057         }
3058         return objcg;
3059 }
3060
3061 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
3062 {
3063         mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
3064         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
3065                 if (nr_pages > 0)
3066                         page_counter_charge(&memcg->kmem, nr_pages);
3067                 else
3068                         page_counter_uncharge(&memcg->kmem, -nr_pages);
3069         }
3070 }
3071
3072
3073 /*
3074  * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
3075  * @objcg: object cgroup to uncharge
3076  * @nr_pages: number of pages to uncharge
3077  */
3078 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
3079                                       unsigned int nr_pages)
3080 {
3081         struct mem_cgroup *memcg;
3082
3083         memcg = get_mem_cgroup_from_objcg(objcg);
3084
3085         memcg_account_kmem(memcg, -nr_pages);
3086         refill_stock(memcg, nr_pages);
3087
3088         css_put(&memcg->css);
3089 }
3090
3091 /*
3092  * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
3093  * @objcg: object cgroup to charge
3094  * @gfp: reclaim mode
3095  * @nr_pages: number of pages to charge
3096  *
3097  * Returns 0 on success, an error code on failure.
3098  */
3099 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3100                                    unsigned int nr_pages)
3101 {
3102         struct mem_cgroup *memcg;
3103         int ret;
3104
3105         memcg = get_mem_cgroup_from_objcg(objcg);
3106
3107         ret = try_charge_memcg(memcg, gfp, nr_pages);
3108         if (ret)
3109                 goto out;
3110
3111         memcg_account_kmem(memcg, nr_pages);
3112 out:
3113         css_put(&memcg->css);
3114
3115         return ret;
3116 }
3117
3118 /**
3119  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3120  * @page: page to charge
3121  * @gfp: reclaim mode
3122  * @order: allocation order
3123  *
3124  * Returns 0 on success, an error code on failure.
3125  */
3126 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3127 {
3128         struct obj_cgroup *objcg;
3129         int ret = 0;
3130
3131         objcg = get_obj_cgroup_from_current();
3132         if (objcg) {
3133                 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3134                 if (!ret) {
3135                         page->memcg_data = (unsigned long)objcg |
3136                                 MEMCG_DATA_KMEM;
3137                         return 0;
3138                 }
3139                 obj_cgroup_put(objcg);
3140         }
3141         return ret;
3142 }
3143
3144 /**
3145  * __memcg_kmem_uncharge_page: uncharge a kmem page
3146  * @page: page to uncharge
3147  * @order: allocation order
3148  */
3149 void __memcg_kmem_uncharge_page(struct page *page, int order)
3150 {
3151         struct folio *folio = page_folio(page);
3152         struct obj_cgroup *objcg;
3153         unsigned int nr_pages = 1 << order;
3154
3155         if (!folio_memcg_kmem(folio))
3156                 return;
3157
3158         objcg = __folio_objcg(folio);
3159         obj_cgroup_uncharge_pages(objcg, nr_pages);
3160         folio->memcg_data = 0;
3161         obj_cgroup_put(objcg);
3162 }
3163
3164 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3165                      enum node_stat_item idx, int nr)
3166 {
3167         struct memcg_stock_pcp *stock;
3168         struct obj_cgroup *old = NULL;
3169         unsigned long flags;
3170         int *bytes;
3171
3172         local_lock_irqsave(&memcg_stock.stock_lock, flags);
3173         stock = this_cpu_ptr(&memcg_stock);
3174
3175         /*
3176          * Save vmstat data in stock and skip vmstat array update unless
3177          * accumulating over a page of vmstat data or when pgdat or idx
3178          * changes.
3179          */
3180         if (READ_ONCE(stock->cached_objcg) != objcg) {
3181                 old = drain_obj_stock(stock);
3182                 obj_cgroup_get(objcg);
3183                 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3184                                 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3185                 WRITE_ONCE(stock->cached_objcg, objcg);
3186                 stock->cached_pgdat = pgdat;
3187         } else if (stock->cached_pgdat != pgdat) {
3188                 /* Flush the existing cached vmstat data */
3189                 struct pglist_data *oldpg = stock->cached_pgdat;
3190
3191                 if (stock->nr_slab_reclaimable_b) {
3192                         mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3193                                           stock->nr_slab_reclaimable_b);
3194                         stock->nr_slab_reclaimable_b = 0;
3195                 }
3196                 if (stock->nr_slab_unreclaimable_b) {
3197                         mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3198                                           stock->nr_slab_unreclaimable_b);
3199                         stock->nr_slab_unreclaimable_b = 0;
3200                 }
3201                 stock->cached_pgdat = pgdat;
3202         }
3203
3204         bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3205                                                : &stock->nr_slab_unreclaimable_b;
3206         /*
3207          * Even for large object >= PAGE_SIZE, the vmstat data will still be
3208          * cached locally at least once before pushing it out.
3209          */
3210         if (!*bytes) {
3211                 *bytes = nr;
3212                 nr = 0;
3213         } else {
3214                 *bytes += nr;
3215                 if (abs(*bytes) > PAGE_SIZE) {
3216                         nr = *bytes;
3217                         *bytes = 0;
3218                 } else {
3219                         nr = 0;
3220                 }
3221         }
3222         if (nr)
3223                 mod_objcg_mlstate(objcg, pgdat, idx, nr);
3224
3225         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3226         if (old)
3227                 obj_cgroup_put(old);
3228 }
3229
3230 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3231 {
3232         struct memcg_stock_pcp *stock;
3233         unsigned long flags;
3234         bool ret = false;
3235
3236         local_lock_irqsave(&memcg_stock.stock_lock, flags);
3237
3238         stock = this_cpu_ptr(&memcg_stock);
3239         if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) {
3240                 stock->nr_bytes -= nr_bytes;
3241                 ret = true;
3242         }
3243
3244         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3245
3246         return ret;
3247 }
3248
3249 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
3250 {
3251         struct obj_cgroup *old = READ_ONCE(stock->cached_objcg);
3252
3253         if (!old)
3254                 return NULL;
3255
3256         if (stock->nr_bytes) {
3257                 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3258                 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3259
3260                 if (nr_pages) {
3261                         struct mem_cgroup *memcg;
3262
3263                         memcg = get_mem_cgroup_from_objcg(old);
3264
3265                         memcg_account_kmem(memcg, -nr_pages);
3266                         __refill_stock(memcg, nr_pages);
3267
3268                         css_put(&memcg->css);
3269                 }
3270
3271                 /*
3272                  * The leftover is flushed to the centralized per-memcg value.
3273                  * On the next attempt to refill obj stock it will be moved
3274                  * to a per-cpu stock (probably, on an other CPU), see
3275                  * refill_obj_stock().
3276                  *
3277                  * How often it's flushed is a trade-off between the memory
3278                  * limit enforcement accuracy and potential CPU contention,
3279                  * so it might be changed in the future.
3280                  */
3281                 atomic_add(nr_bytes, &old->nr_charged_bytes);
3282                 stock->nr_bytes = 0;
3283         }
3284
3285         /*
3286          * Flush the vmstat data in current stock
3287          */
3288         if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3289                 if (stock->nr_slab_reclaimable_b) {
3290                         mod_objcg_mlstate(old, stock->cached_pgdat,
3291                                           NR_SLAB_RECLAIMABLE_B,
3292                                           stock->nr_slab_reclaimable_b);
3293                         stock->nr_slab_reclaimable_b = 0;
3294                 }
3295                 if (stock->nr_slab_unreclaimable_b) {
3296                         mod_objcg_mlstate(old, stock->cached_pgdat,
3297                                           NR_SLAB_UNRECLAIMABLE_B,
3298                                           stock->nr_slab_unreclaimable_b);
3299                         stock->nr_slab_unreclaimable_b = 0;
3300                 }
3301                 stock->cached_pgdat = NULL;
3302         }
3303
3304         WRITE_ONCE(stock->cached_objcg, NULL);
3305         /*
3306          * The `old' objects needs to be released by the caller via
3307          * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3308          */
3309         return old;
3310 }
3311
3312 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3313                                      struct mem_cgroup *root_memcg)
3314 {
3315         struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg);
3316         struct mem_cgroup *memcg;
3317
3318         if (objcg) {
3319                 memcg = obj_cgroup_memcg(objcg);
3320                 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3321                         return true;
3322         }
3323
3324         return false;
3325 }
3326
3327 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3328                              bool allow_uncharge)
3329 {
3330         struct memcg_stock_pcp *stock;
3331         struct obj_cgroup *old = NULL;
3332         unsigned long flags;
3333         unsigned int nr_pages = 0;
3334
3335         local_lock_irqsave(&memcg_stock.stock_lock, flags);
3336
3337         stock = this_cpu_ptr(&memcg_stock);
3338         if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */
3339                 old = drain_obj_stock(stock);
3340                 obj_cgroup_get(objcg);
3341                 WRITE_ONCE(stock->cached_objcg, objcg);
3342                 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3343                                 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3344                 allow_uncharge = true;  /* Allow uncharge when objcg changes */
3345         }
3346         stock->nr_bytes += nr_bytes;
3347
3348         if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3349                 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3350                 stock->nr_bytes &= (PAGE_SIZE - 1);
3351         }
3352
3353         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3354         if (old)
3355                 obj_cgroup_put(old);
3356
3357         if (nr_pages)
3358                 obj_cgroup_uncharge_pages(objcg, nr_pages);
3359 }
3360
3361 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3362 {
3363         unsigned int nr_pages, nr_bytes;
3364         int ret;
3365
3366         if (consume_obj_stock(objcg, size))
3367                 return 0;
3368
3369         /*
3370          * In theory, objcg->nr_charged_bytes can have enough
3371          * pre-charged bytes to satisfy the allocation. However,
3372          * flushing objcg->nr_charged_bytes requires two atomic
3373          * operations, and objcg->nr_charged_bytes can't be big.
3374          * The shared objcg->nr_charged_bytes can also become a
3375          * performance bottleneck if all tasks of the same memcg are
3376          * trying to update it. So it's better to ignore it and try
3377          * grab some new pages. The stock's nr_bytes will be flushed to
3378          * objcg->nr_charged_bytes later on when objcg changes.
3379          *
3380          * The stock's nr_bytes may contain enough pre-charged bytes
3381          * to allow one less page from being charged, but we can't rely
3382          * on the pre-charged bytes not being changed outside of
3383          * consume_obj_stock() or refill_obj_stock(). So ignore those
3384          * pre-charged bytes as well when charging pages. To avoid a
3385          * page uncharge right after a page charge, we set the
3386          * allow_uncharge flag to false when calling refill_obj_stock()
3387          * to temporarily allow the pre-charged bytes to exceed the page
3388          * size limit. The maximum reachable value of the pre-charged
3389          * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3390          * race.
3391          */
3392         nr_pages = size >> PAGE_SHIFT;
3393         nr_bytes = size & (PAGE_SIZE - 1);
3394
3395         if (nr_bytes)
3396                 nr_pages += 1;
3397
3398         ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3399         if (!ret && nr_bytes)
3400                 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3401
3402         return ret;
3403 }
3404
3405 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3406 {
3407         refill_obj_stock(objcg, size, true);
3408 }
3409
3410 #endif /* CONFIG_MEMCG_KMEM */
3411
3412 /*
3413  * Because page_memcg(head) is not set on tails, set it now.
3414  */
3415 void split_page_memcg(struct page *head, unsigned int nr)
3416 {
3417         struct folio *folio = page_folio(head);
3418         struct mem_cgroup *memcg = folio_memcg(folio);
3419         int i;
3420
3421         if (mem_cgroup_disabled() || !memcg)
3422                 return;
3423
3424         for (i = 1; i < nr; i++)
3425                 folio_page(folio, i)->memcg_data = folio->memcg_data;
3426
3427         if (folio_memcg_kmem(folio))
3428                 obj_cgroup_get_many(__folio_objcg(folio), nr - 1);
3429         else
3430                 css_get_many(&memcg->css, nr - 1);
3431 }
3432
3433 #ifdef CONFIG_SWAP
3434 /**
3435  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3436  * @entry: swap entry to be moved
3437  * @from:  mem_cgroup which the entry is moved from
3438  * @to:  mem_cgroup which the entry is moved to
3439  *
3440  * It succeeds only when the swap_cgroup's record for this entry is the same
3441  * as the mem_cgroup's id of @from.
3442  *
3443  * Returns 0 on success, -EINVAL on failure.
3444  *
3445  * The caller must have charged to @to, IOW, called page_counter_charge() about
3446  * both res and memsw, and called css_get().
3447  */
3448 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3449                                 struct mem_cgroup *from, struct mem_cgroup *to)
3450 {
3451         unsigned short old_id, new_id;
3452
3453         old_id = mem_cgroup_id(from);
3454         new_id = mem_cgroup_id(to);
3455
3456         if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3457                 mod_memcg_state(from, MEMCG_SWAP, -1);
3458                 mod_memcg_state(to, MEMCG_SWAP, 1);
3459                 return 0;
3460         }
3461         return -EINVAL;
3462 }
3463 #else
3464 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3465                                 struct mem_cgroup *from, struct mem_cgroup *to)
3466 {
3467         return -EINVAL;
3468 }
3469 #endif
3470
3471 static DEFINE_MUTEX(memcg_max_mutex);
3472
3473 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3474                                  unsigned long max, bool memsw)
3475 {
3476         bool enlarge = false;
3477         bool drained = false;
3478         int ret;
3479         bool limits_invariant;
3480         struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3481
3482         do {
3483                 if (signal_pending(current)) {
3484                         ret = -EINTR;
3485                         break;
3486                 }
3487
3488                 mutex_lock(&memcg_max_mutex);
3489                 /*
3490                  * Make sure that the new limit (memsw or memory limit) doesn't
3491                  * break our basic invariant rule memory.max <= memsw.max.
3492                  */
3493                 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3494                                            max <= memcg->memsw.max;
3495                 if (!limits_invariant) {
3496                         mutex_unlock(&memcg_max_mutex);
3497                         ret = -EINVAL;
3498                         break;
3499                 }
3500                 if (max > counter->max)
3501                         enlarge = true;
3502                 ret = page_counter_set_max(counter, max);
3503                 mutex_unlock(&memcg_max_mutex);
3504
3505                 if (!ret)
3506                         break;
3507
3508                 if (!drained) {
3509                         drain_all_stock(memcg);
3510                         drained = true;
3511                         continue;
3512                 }
3513
3514                 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3515                                         memsw ? 0 : MEMCG_RECLAIM_MAY_SWAP)) {
3516                         ret = -EBUSY;
3517                         break;
3518                 }
3519         } while (true);
3520
3521         if (!ret && enlarge)
3522                 memcg_oom_recover(memcg);
3523
3524         return ret;
3525 }
3526
3527 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3528                                             gfp_t gfp_mask,
3529                                             unsigned long *total_scanned)
3530 {
3531         unsigned long nr_reclaimed = 0;
3532         struct mem_cgroup_per_node *mz, *next_mz = NULL;
3533         unsigned long reclaimed;
3534         int loop = 0;
3535         struct mem_cgroup_tree_per_node *mctz;
3536         unsigned long excess;
3537
3538         if (lru_gen_enabled())
3539                 return 0;
3540
3541         if (order > 0)
3542                 return 0;
3543
3544         mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
3545
3546         /*
3547          * Do not even bother to check the largest node if the root
3548          * is empty. Do it lockless to prevent lock bouncing. Races
3549          * are acceptable as soft limit is best effort anyway.
3550          */
3551         if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3552                 return 0;
3553
3554         /*
3555          * This loop can run a while, specially if mem_cgroup's continuously
3556          * keep exceeding their soft limit and putting the system under
3557          * pressure
3558          */
3559         do {
3560                 if (next_mz)
3561                         mz = next_mz;
3562                 else
3563                         mz = mem_cgroup_largest_soft_limit_node(mctz);
3564                 if (!mz)
3565                         break;
3566
3567                 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3568                                                     gfp_mask, total_scanned);
3569                 nr_reclaimed += reclaimed;
3570                 spin_lock_irq(&mctz->lock);
3571
3572                 /*
3573                  * If we failed to reclaim anything from this memory cgroup
3574                  * it is time to move on to the next cgroup
3575                  */
3576                 next_mz = NULL;
3577                 if (!reclaimed)
3578                         next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3579
3580                 excess = soft_limit_excess(mz->memcg);
3581                 /*
3582                  * One school of thought says that we should not add
3583                  * back the node to the tree if reclaim returns 0.
3584                  * But our reclaim could return 0, simply because due
3585                  * to priority we are exposing a smaller subset of
3586                  * memory to reclaim from. Consider this as a longer
3587                  * term TODO.
3588                  */
3589                 /* If excess == 0, no tree ops */
3590                 __mem_cgroup_insert_exceeded(mz, mctz, excess);
3591                 spin_unlock_irq(&mctz->lock);
3592                 css_put(&mz->memcg->css);
3593                 loop++;
3594                 /*
3595                  * Could not reclaim anything and there are no more
3596                  * mem cgroups to try or we seem to be looping without
3597                  * reclaiming anything.
3598                  */
3599                 if (!nr_reclaimed &&
3600                         (next_mz == NULL ||
3601                         loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3602                         break;
3603         } while (!nr_reclaimed);
3604         if (next_mz)
3605                 css_put(&next_mz->memcg->css);
3606         return nr_reclaimed;
3607 }
3608
3609 /*
3610  * Reclaims as many pages from the given memcg as possible.
3611  *
3612  * Caller is responsible for holding css reference for memcg.
3613  */
3614 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3615 {
3616         int nr_retries = MAX_RECLAIM_RETRIES;
3617
3618         /* we call try-to-free pages for make this cgroup empty */
3619         lru_add_drain_all();
3620
3621         drain_all_stock(memcg);
3622
3623         /* try to free all pages in this cgroup */
3624         while (nr_retries && page_counter_read(&memcg->memory)) {
3625                 if (signal_pending(current))
3626                         return -EINTR;
3627
3628                 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL,
3629                                                   MEMCG_RECLAIM_MAY_SWAP))
3630                         nr_retries--;
3631         }
3632
3633         return 0;
3634 }
3635
3636 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3637                                             char *buf, size_t nbytes,
3638                                             loff_t off)
3639 {
3640         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3641
3642         if (mem_cgroup_is_root(memcg))
3643                 return -EINVAL;
3644         return mem_cgroup_force_empty(memcg) ?: nbytes;
3645 }
3646
3647 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3648                                      struct cftype *cft)
3649 {
3650         return 1;
3651 }
3652
3653 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3654                                       struct cftype *cft, u64 val)
3655 {
3656         if (val == 1)
3657                 return 0;
3658
3659         pr_warn_once("Non-hierarchical mode is deprecated. "
3660                      "Please report your usecase to linux-mm@kvack.org if you "
3661                      "depend on this functionality.\n");
3662
3663         return -EINVAL;
3664 }
3665
3666 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3667 {
3668         unsigned long val;
3669
3670         if (mem_cgroup_is_root(memcg)) {
3671                 /*
3672                  * Approximate root's usage from global state. This isn't
3673                  * perfect, but the root usage was always an approximation.
3674                  */
3675                 val = global_node_page_state(NR_FILE_PAGES) +
3676                         global_node_page_state(NR_ANON_MAPPED);
3677                 if (swap)
3678                         val += total_swap_pages - get_nr_swap_pages();
3679         } else {
3680                 if (!swap)
3681                         val = page_counter_read(&memcg->memory);
3682                 else
3683                         val = page_counter_read(&memcg->memsw);
3684         }
3685         return val;
3686 }
3687
3688 enum {
3689         RES_USAGE,
3690         RES_LIMIT,
3691         RES_MAX_USAGE,
3692         RES_FAILCNT,
3693         RES_SOFT_LIMIT,
3694 };
3695
3696 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3697                                struct cftype *cft)
3698 {
3699         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3700         struct page_counter *counter;
3701
3702         switch (MEMFILE_TYPE(cft->private)) {
3703         case _MEM:
3704                 counter = &memcg->memory;
3705                 break;
3706         case _MEMSWAP:
3707                 counter = &memcg->memsw;
3708                 break;
3709         case _KMEM:
3710                 counter = &memcg->kmem;
3711                 break;
3712         case _TCP:
3713                 counter = &memcg->tcpmem;
3714                 break;
3715         default:
3716                 BUG();
3717         }
3718
3719         switch (MEMFILE_ATTR(cft->private)) {
3720         case RES_USAGE:
3721                 if (counter == &memcg->memory)
3722                         return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3723                 if (counter == &memcg->memsw)
3724                         return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3725                 return (u64)page_counter_read(counter) * PAGE_SIZE;
3726         case RES_LIMIT:
3727                 return (u64)counter->max * PAGE_SIZE;
3728         case RES_MAX_USAGE:
3729                 return (u64)counter->watermark * PAGE_SIZE;
3730         case RES_FAILCNT:
3731                 return counter->failcnt;
3732         case RES_SOFT_LIMIT:
3733                 return (u64)READ_ONCE(memcg->soft_limit) * PAGE_SIZE;
3734         default:
3735                 BUG();
3736         }
3737 }
3738
3739 /*
3740  * This function doesn't do anything useful. Its only job is to provide a read
3741  * handler for a file so that cgroup_file_mode() will add read permissions.
3742  */
3743 static int mem_cgroup_dummy_seq_show(__always_unused struct seq_file *m,
3744                                      __always_unused void *v)
3745 {
3746         return -EINVAL;
3747 }
3748
3749 #ifdef CONFIG_MEMCG_KMEM
3750 static int memcg_online_kmem(struct mem_cgroup *memcg)
3751 {
3752         struct obj_cgroup *objcg;
3753
3754         if (mem_cgroup_kmem_disabled())
3755                 return 0;
3756
3757         if (unlikely(mem_cgroup_is_root(memcg)))
3758                 return 0;
3759
3760         objcg = obj_cgroup_alloc();
3761         if (!objcg)
3762                 return -ENOMEM;
3763
3764         objcg->memcg = memcg;
3765         rcu_assign_pointer(memcg->objcg, objcg);
3766
3767         static_branch_enable(&memcg_kmem_online_key);
3768
3769         memcg->kmemcg_id = memcg->id.id;
3770
3771         return 0;
3772 }
3773
3774 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3775 {
3776         struct mem_cgroup *parent;
3777
3778         if (mem_cgroup_kmem_disabled())
3779                 return;
3780
3781         if (unlikely(mem_cgroup_is_root(memcg)))
3782                 return;
3783
3784         parent = parent_mem_cgroup(memcg);
3785         if (!parent)
3786                 parent = root_mem_cgroup;
3787
3788         memcg_reparent_objcgs(memcg, parent);
3789
3790         /*
3791          * After we have finished memcg_reparent_objcgs(), all list_lrus
3792          * corresponding to this cgroup are guaranteed to remain empty.
3793          * The ordering is imposed by list_lru_node->lock taken by
3794          * memcg_reparent_list_lrus().
3795          */
3796         memcg_reparent_list_lrus(memcg, parent);
3797 }
3798 #else
3799 static int memcg_online_kmem(struct mem_cgroup *memcg)
3800 {
3801         return 0;
3802 }
3803 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3804 {
3805 }
3806 #endif /* CONFIG_MEMCG_KMEM */
3807
3808 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3809 {
3810         int ret;
3811
3812         mutex_lock(&memcg_max_mutex);
3813
3814         ret = page_counter_set_max(&memcg->tcpmem, max);
3815         if (ret)
3816                 goto out;
3817
3818         if (!memcg->tcpmem_active) {
3819                 /*
3820                  * The active flag needs to be written after the static_key
3821                  * update. This is what guarantees that the socket activation
3822                  * function is the last one to run. See mem_cgroup_sk_alloc()
3823                  * for details, and note that we don't mark any socket as
3824                  * belonging to this memcg until that flag is up.
3825                  *
3826                  * We need to do this, because static_keys will span multiple
3827                  * sites, but we can't control their order. If we mark a socket
3828                  * as accounted, but the accounting functions are not patched in
3829                  * yet, we'll lose accounting.
3830                  *
3831                  * We never race with the readers in mem_cgroup_sk_alloc(),
3832                  * because when this value change, the code to process it is not
3833                  * patched in yet.
3834                  */
3835                 static_branch_inc(&memcg_sockets_enabled_key);
3836                 memcg->tcpmem_active = true;
3837         }
3838 out:
3839         mutex_unlock(&memcg_max_mutex);
3840         return ret;
3841 }
3842
3843 /*
3844  * The user of this function is...
3845  * RES_LIMIT.
3846  */
3847 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3848                                 char *buf, size_t nbytes, loff_t off)
3849 {
3850         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3851         unsigned long nr_pages;
3852         int ret;
3853
3854         buf = strstrip(buf);
3855         ret = page_counter_memparse(buf, "-1", &nr_pages);
3856         if (ret)
3857                 return ret;
3858
3859         switch (MEMFILE_ATTR(of_cft(of)->private)) {
3860         case RES_LIMIT:
3861                 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3862                         ret = -EINVAL;
3863                         break;
3864                 }
3865                 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3866                 case _MEM:
3867                         ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3868                         break;
3869                 case _MEMSWAP:
3870                         ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3871                         break;
3872                 case _KMEM:
3873                         pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
3874                                      "Writing any value to this file has no effect. "
3875                                      "Please report your usecase to linux-mm@kvack.org if you "
3876                                      "depend on this functionality.\n");
3877                         ret = 0;
3878                         break;
3879                 case _TCP:
3880                         ret = memcg_update_tcp_max(memcg, nr_pages);
3881                         break;
3882                 }
3883                 break;
3884         case RES_SOFT_LIMIT:
3885                 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
3886                         ret = -EOPNOTSUPP;
3887                 } else {
3888                         WRITE_ONCE(memcg->soft_limit, nr_pages);
3889                         ret = 0;
3890                 }
3891                 break;
3892         }
3893         return ret ?: nbytes;
3894 }
3895
3896 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3897                                 size_t nbytes, loff_t off)
3898 {
3899         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3900         struct page_counter *counter;
3901
3902         switch (MEMFILE_TYPE(of_cft(of)->private)) {
3903         case _MEM:
3904                 counter = &memcg->memory;
3905                 break;
3906         case _MEMSWAP:
3907                 counter = &memcg->memsw;
3908                 break;
3909         case _KMEM:
3910                 counter = &memcg->kmem;
3911                 break;
3912         case _TCP:
3913                 counter = &memcg->tcpmem;
3914                 break;
3915         default:
3916                 BUG();
3917         }
3918
3919         switch (MEMFILE_ATTR(of_cft(of)->private)) {
3920         case RES_MAX_USAGE:
3921                 page_counter_reset_watermark(counter);
3922                 break;
3923         case RES_FAILCNT:
3924                 counter->failcnt = 0;
3925                 break;
3926         default:
3927                 BUG();
3928         }
3929
3930         return nbytes;
3931 }
3932
3933 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3934                                         struct cftype *cft)
3935 {
3936         return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3937 }
3938
3939 #ifdef CONFIG_MMU
3940 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3941                                         struct cftype *cft, u64 val)
3942 {
3943         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3944
3945         pr_warn_once("Cgroup memory moving (move_charge_at_immigrate) is deprecated. "
3946                      "Please report your usecase to linux-mm@kvack.org if you "
3947                      "depend on this functionality.\n");
3948
3949         if (val & ~MOVE_MASK)
3950                 return -EINVAL;
3951
3952         /*
3953          * No kind of locking is needed in here, because ->can_attach() will
3954          * check this value once in the beginning of the process, and then carry
3955          * on with stale data. This means that changes to this value will only
3956          * affect task migrations starting after the change.
3957          */
3958         memcg->move_charge_at_immigrate = val;
3959         return 0;
3960 }
3961 #else
3962 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3963                                         struct cftype *cft, u64 val)
3964 {
3965         return -ENOSYS;
3966 }
3967 #endif
3968
3969 #ifdef CONFIG_NUMA
3970
3971 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3972 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3973 #define LRU_ALL      ((1 << NR_LRU_LISTS) - 1)
3974
3975 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3976                                 int nid, unsigned int lru_mask, bool tree)
3977 {
3978         struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3979         unsigned long nr = 0;
3980         enum lru_list lru;
3981
3982         VM_BUG_ON((unsigned)nid >= nr_node_ids);
3983
3984         for_each_lru(lru) {
3985                 if (!(BIT(lru) & lru_mask))
3986                         continue;
3987                 if (tree)
3988                         nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3989                 else
3990                         nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3991         }
3992         return nr;
3993 }
3994
3995 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3996                                              unsigned int lru_mask,
3997                                              bool tree)
3998 {
3999         unsigned long nr = 0;
4000         enum lru_list lru;
4001
4002         for_each_lru(lru) {
4003                 if (!(BIT(lru) & lru_mask))
4004                         continue;
4005                 if (tree)
4006                         nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
4007                 else
4008                         nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
4009         }
4010         return nr;
4011 }
4012
4013 static int memcg_numa_stat_show(struct seq_file *m, void *v)
4014 {
4015         struct numa_stat {
4016                 const char *name;
4017                 unsigned int lru_mask;
4018         };
4019
4020         static const struct numa_stat stats[] = {
4021                 { "total", LRU_ALL },
4022                 { "file", LRU_ALL_FILE },
4023                 { "anon", LRU_ALL_ANON },
4024                 { "unevictable", BIT(LRU_UNEVICTABLE) },
4025         };
4026         const struct numa_stat *stat;
4027         int nid;
4028         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4029
4030         mem_cgroup_flush_stats();
4031
4032         for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4033                 seq_printf(m, "%s=%lu", stat->name,
4034                            mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4035                                                    false));
4036                 for_each_node_state(nid, N_MEMORY)
4037                         seq_printf(m, " N%d=%lu", nid,
4038                                    mem_cgroup_node_nr_lru_pages(memcg, nid,
4039                                                         stat->lru_mask, false));
4040                 seq_putc(m, '\n');
4041         }
4042
4043         for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
4044
4045                 seq_printf(m, "hierarchical_%s=%lu", stat->name,
4046                            mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
4047                                                    true));
4048                 for_each_node_state(nid, N_MEMORY)
4049                         seq_printf(m, " N%d=%lu", nid,
4050                                    mem_cgroup_node_nr_lru_pages(memcg, nid,
4051                                                         stat->lru_mask, true));
4052                 seq_putc(m, '\n');
4053         }
4054
4055         return 0;
4056 }
4057 #endif /* CONFIG_NUMA */
4058
4059 static const unsigned int memcg1_stats[] = {
4060         NR_FILE_PAGES,
4061         NR_ANON_MAPPED,
4062 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4063         NR_ANON_THPS,
4064 #endif
4065         NR_SHMEM,
4066         NR_FILE_MAPPED,
4067         NR_FILE_DIRTY,
4068         NR_WRITEBACK,
4069         WORKINGSET_REFAULT_ANON,
4070         WORKINGSET_REFAULT_FILE,
4071 #ifdef CONFIG_SWAP
4072         MEMCG_SWAP,
4073         NR_SWAPCACHE,
4074 #endif
4075 };
4076
4077 static const char *const memcg1_stat_names[] = {
4078         "cache",
4079         "rss",
4080 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4081         "rss_huge",
4082 #endif
4083         "shmem",
4084         "mapped_file",
4085         "dirty",
4086         "writeback",
4087         "workingset_refault_anon",
4088         "workingset_refault_file",
4089 #ifdef CONFIG_SWAP
4090         "swap",
4091         "swapcached",
4092 #endif
4093 };
4094
4095 /* Universal VM events cgroup1 shows, original sort order */
4096 static const unsigned int memcg1_events[] = {
4097         PGPGIN,
4098         PGPGOUT,
4099         PGFAULT,
4100         PGMAJFAULT,
4101 };
4102
4103 static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s)
4104 {
4105         unsigned long memory, memsw;
4106         struct mem_cgroup *mi;
4107         unsigned int i;
4108
4109         BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4110
4111         mem_cgroup_flush_stats();
4112
4113         for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4114                 unsigned long nr;
4115
4116                 nr = memcg_page_state_local(memcg, memcg1_stats[i]);
4117                 seq_buf_printf(s, "%s %lu\n", memcg1_stat_names[i],
4118                            nr * memcg_page_state_unit(memcg1_stats[i]));
4119         }
4120
4121         for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4122                 seq_buf_printf(s, "%s %lu\n", vm_event_name(memcg1_events[i]),
4123                                memcg_events_local(memcg, memcg1_events[i]));
4124
4125         for (i = 0; i < NR_LRU_LISTS; i++)
4126                 seq_buf_printf(s, "%s %lu\n", lru_list_name(i),
4127                                memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4128                                PAGE_SIZE);
4129
4130         /* Hierarchical information */
4131         memory = memsw = PAGE_COUNTER_MAX;
4132         for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4133                 memory = min(memory, READ_ONCE(mi->memory.max));
4134                 memsw = min(memsw, READ_ONCE(mi->memsw.max));
4135         }
4136         seq_buf_printf(s, "hierarchical_memory_limit %llu\n",
4137                        (u64)memory * PAGE_SIZE);
4138         seq_buf_printf(s, "hierarchical_memsw_limit %llu\n",
4139                        (u64)memsw * PAGE_SIZE);
4140
4141         for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4142                 unsigned long nr;
4143
4144                 nr = memcg_page_state(memcg, memcg1_stats[i]);
4145                 seq_buf_printf(s, "total_%s %llu\n", memcg1_stat_names[i],
4146                            (u64)nr * memcg_page_state_unit(memcg1_stats[i]));
4147         }
4148
4149         for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4150                 seq_buf_printf(s, "total_%s %llu\n",
4151                                vm_event_name(memcg1_events[i]),
4152                                (u64)memcg_events(memcg, memcg1_events[i]));
4153
4154         for (i = 0; i < NR_LRU_LISTS; i++)
4155                 seq_buf_printf(s, "total_%s %llu\n", lru_list_name(i),
4156                                (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4157                                PAGE_SIZE);
4158
4159 #ifdef CONFIG_DEBUG_VM
4160         {
4161                 pg_data_t *pgdat;
4162                 struct mem_cgroup_per_node *mz;
4163                 unsigned long anon_cost = 0;
4164                 unsigned long file_cost = 0;
4165
4166                 for_each_online_pgdat(pgdat) {
4167                         mz = memcg->nodeinfo[pgdat->node_id];
4168
4169                         anon_cost += mz->lruvec.anon_cost;
4170                         file_cost += mz->lruvec.file_cost;
4171                 }
4172                 seq_buf_printf(s, "anon_cost %lu\n", anon_cost);
4173                 seq_buf_printf(s, "file_cost %lu\n", file_cost);
4174         }
4175 #endif
4176 }
4177
4178 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4179                                       struct cftype *cft)
4180 {
4181         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4182
4183         return mem_cgroup_swappiness(memcg);
4184 }
4185
4186 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4187                                        struct cftype *cft, u64 val)
4188 {
4189         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4190
4191         if (val > 200)
4192                 return -EINVAL;
4193
4194         if (!mem_cgroup_is_root(memcg))
4195                 WRITE_ONCE(memcg->swappiness, val);
4196         else
4197                 WRITE_ONCE(vm_swappiness, val);
4198
4199         return 0;
4200 }
4201
4202 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4203 {
4204         struct mem_cgroup_threshold_ary *t;
4205         unsigned long usage;
4206         int i;
4207
4208         rcu_read_lock();
4209         if (!swap)
4210                 t = rcu_dereference(memcg->thresholds.primary);
4211         else
4212                 t = rcu_dereference(memcg->memsw_thresholds.primary);
4213
4214         if (!t)
4215                 goto unlock;
4216
4217         usage = mem_cgroup_usage(memcg, swap);
4218
4219         /*
4220          * current_threshold points to threshold just below or equal to usage.
4221          * If it's not true, a threshold was crossed after last
4222          * call of __mem_cgroup_threshold().
4223          */
4224         i = t->current_threshold;
4225
4226         /*
4227          * Iterate backward over array of thresholds starting from
4228          * current_threshold and check if a threshold is crossed.
4229          * If none of thresholds below usage is crossed, we read
4230          * only one element of the array here.
4231          */
4232         for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4233                 eventfd_signal(t->entries[i].eventfd, 1);
4234
4235         /* i = current_threshold + 1 */
4236         i++;
4237
4238         /*
4239          * Iterate forward over array of thresholds starting from
4240          * current_threshold+1 and check if a threshold is crossed.
4241          * If none of thresholds above usage is crossed, we read
4242          * only one element of the array here.
4243          */
4244         for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4245                 eventfd_signal(t->entries[i].eventfd, 1);
4246
4247         /* Update current_threshold */
4248         t->current_threshold = i - 1;
4249 unlock:
4250         rcu_read_unlock();
4251 }
4252
4253 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4254 {
4255         while (memcg) {
4256                 __mem_cgroup_threshold(memcg, false);
4257                 if (do_memsw_account())
4258                         __mem_cgroup_threshold(memcg, true);
4259
4260                 memcg = parent_mem_cgroup(memcg);
4261         }
4262 }
4263
4264 static int compare_thresholds(const void *a, const void *b)
4265 {
4266         const struct mem_cgroup_threshold *_a = a;
4267         const struct mem_cgroup_threshold *_b = b;
4268
4269         if (_a->threshold > _b->threshold)
4270                 return 1;
4271
4272         if (_a->threshold < _b->threshold)
4273                 return -1;
4274
4275         return 0;
4276 }
4277
4278 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4279 {
4280         struct mem_cgroup_eventfd_list *ev;
4281
4282         spin_lock(&memcg_oom_lock);
4283
4284         list_for_each_entry(ev, &memcg->oom_notify, list)
4285                 eventfd_signal(ev->eventfd, 1);
4286
4287         spin_unlock(&memcg_oom_lock);
4288         return 0;
4289 }
4290
4291 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4292 {
4293         struct mem_cgroup *iter;
4294
4295         for_each_mem_cgroup_tree(iter, memcg)
4296                 mem_cgroup_oom_notify_cb(iter);
4297 }
4298
4299 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4300         struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4301 {
4302         struct mem_cgroup_thresholds *thresholds;
4303         struct mem_cgroup_threshold_ary *new;
4304         unsigned long threshold;
4305         unsigned long usage;
4306         int i, size, ret;
4307
4308         ret = page_counter_memparse(args, "-1", &threshold);
4309         if (ret)
4310                 return ret;
4311
4312         mutex_lock(&memcg->thresholds_lock);
4313
4314         if (type == _MEM) {
4315                 thresholds = &memcg->thresholds;
4316                 usage = mem_cgroup_usage(memcg, false);
4317         } else if (type == _MEMSWAP) {
4318                 thresholds = &memcg->memsw_thresholds;
4319                 usage = mem_cgroup_usage(memcg, true);
4320         } else
4321                 BUG();
4322
4323         /* Check if a threshold crossed before adding a new one */
4324         if (thresholds->primary)
4325                 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4326
4327         size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4328
4329         /* Allocate memory for new array of thresholds */
4330         new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4331         if (!new) {
4332                 ret = -ENOMEM;
4333                 goto unlock;
4334         }
4335         new->size = size;
4336
4337         /* Copy thresholds (if any) to new array */
4338         if (thresholds->primary)
4339                 memcpy(new->entries, thresholds->primary->entries,
4340                        flex_array_size(new, entries, size - 1));
4341
4342         /* Add new threshold */
4343         new->entries[size - 1].eventfd = eventfd;
4344         new->entries[size - 1].threshold = threshold;
4345
4346         /* Sort thresholds. Registering of new threshold isn't time-critical */
4347         sort(new->entries, size, sizeof(*new->entries),
4348                         compare_thresholds, NULL);
4349
4350         /* Find current threshold */
4351         new->current_threshold = -1;
4352         for (i = 0; i < size; i++) {
4353                 if (new->entries[i].threshold <= usage) {
4354                         /*
4355                          * new->current_threshold will not be used until
4356                          * rcu_assign_pointer(), so it's safe to increment
4357                          * it here.
4358                          */
4359                         ++new->current_threshold;
4360                 } else
4361                         break;
4362         }
4363
4364         /* Free old spare buffer and save old primary buffer as spare */
4365         kfree(thresholds->spare);
4366         thresholds->spare = thresholds->primary;
4367
4368         rcu_assign_pointer(thresholds->primary, new);
4369
4370         /* To be sure that nobody uses thresholds */
4371         synchronize_rcu();
4372
4373 unlock:
4374         mutex_unlock(&memcg->thresholds_lock);
4375
4376         return ret;
4377 }
4378
4379 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4380         struct eventfd_ctx *eventfd, const char *args)
4381 {
4382         return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4383 }
4384
4385 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4386         struct eventfd_ctx *eventfd, const char *args)
4387 {
4388         return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4389 }
4390
4391 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4392         struct eventfd_ctx *eventfd, enum res_type type)
4393 {
4394         struct mem_cgroup_thresholds *thresholds;
4395         struct mem_cgroup_threshold_ary *new;
4396         unsigned long usage;
4397         int i, j, size, entries;
4398
4399         mutex_lock(&memcg->thresholds_lock);
4400
4401         if (type == _MEM) {
4402                 thresholds = &memcg->thresholds;
4403                 usage = mem_cgroup_usage(memcg, false);
4404         } else if (type == _MEMSWAP) {
4405                 thresholds = &memcg->memsw_thresholds;
4406                 usage = mem_cgroup_usage(memcg, true);
4407         } else
4408                 BUG();
4409
4410         if (!thresholds->primary)
4411                 goto unlock;
4412
4413         /* Check if a threshold crossed before removing */
4414         __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4415
4416         /* Calculate new number of threshold */
4417         size = entries = 0;
4418         for (i = 0; i < thresholds->primary->size; i++) {
4419                 if (thresholds->primary->entries[i].eventfd != eventfd)
4420                         size++;
4421                 else
4422                         entries++;
4423         }
4424
4425         new = thresholds->spare;
4426
4427         /* If no items related to eventfd have been cleared, nothing to do */
4428         if (!entries)
4429                 goto unlock;
4430
4431         /* Set thresholds array to NULL if we don't have thresholds */
4432         if (!size) {
4433                 kfree(new);
4434                 new = NULL;
4435                 goto swap_buffers;
4436         }
4437
4438         new->size = size;
4439
4440         /* Copy thresholds and find current threshold */
4441         new->current_threshold = -1;
4442         for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4443                 if (thresholds->primary->entries[i].eventfd == eventfd)
4444                         continue;
4445
4446                 new->entries[j] = thresholds->primary->entries[i];
4447                 if (new->entries[j].threshold <= usage) {
4448                         /*
4449                          * new->current_threshold will not be used
4450                          * until rcu_assign_pointer(), so it's safe to increment
4451                          * it here.
4452                          */
4453                         ++new->current_threshold;
4454                 }
4455                 j++;
4456         }
4457
4458 swap_buffers:
4459         /* Swap primary and spare array */
4460         thresholds->spare = thresholds->primary;
4461
4462         rcu_assign_pointer(thresholds->primary, new);
4463
4464         /* To be sure that nobody uses thresholds */
4465         synchronize_rcu();
4466
4467         /* If all events are unregistered, free the spare array */
4468         if (!new) {
4469                 kfree(thresholds->spare);
4470                 thresholds->spare = NULL;
4471         }
4472 unlock:
4473         mutex_unlock(&memcg->thresholds_lock);
4474 }
4475
4476 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4477         struct eventfd_ctx *eventfd)
4478 {
4479         return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4480 }
4481
4482 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4483         struct eventfd_ctx *eventfd)
4484 {
4485         return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4486 }
4487
4488 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4489         struct eventfd_ctx *eventfd, const char *args)
4490 {
4491         struct mem_cgroup_eventfd_list *event;
4492
4493         event = kmalloc(sizeof(*event), GFP_KERNEL);
4494         if (!event)
4495                 return -ENOMEM;
4496
4497         spin_lock(&memcg_oom_lock);
4498
4499         event->eventfd = eventfd;
4500         list_add(&event->list, &memcg->oom_notify);
4501
4502         /* already in OOM ? */
4503         if (memcg->under_oom)
4504                 eventfd_signal(eventfd, 1);
4505         spin_unlock(&memcg_oom_lock);
4506
4507         return 0;
4508 }
4509
4510 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4511         struct eventfd_ctx *eventfd)
4512 {
4513         struct mem_cgroup_eventfd_list *ev, *tmp;
4514
4515         spin_lock(&memcg_oom_lock);
4516
4517         list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4518                 if (ev->eventfd == eventfd) {
4519                         list_del(&ev->list);
4520                         kfree(ev);
4521                 }
4522         }
4523
4524         spin_unlock(&memcg_oom_lock);
4525 }
4526
4527 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4528 {
4529         struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4530
4531         seq_printf(sf, "oom_kill_disable %d\n", READ_ONCE(memcg->oom_kill_disable));
4532         seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4533         seq_printf(sf, "oom_kill %lu\n",
4534                    atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4535         return 0;
4536 }
4537
4538 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4539         struct cftype *cft, u64 val)
4540 {
4541         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4542
4543         /* cannot set to root cgroup and only 0 and 1 are allowed */
4544         if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4545                 return -EINVAL;
4546
4547         WRITE_ONCE(memcg->oom_kill_disable, val);
4548         if (!val)
4549                 memcg_oom_recover(memcg);
4550
4551         return 0;
4552 }
4553
4554 #ifdef CONFIG_CGROUP_WRITEBACK
4555
4556 #include <trace/events/writeback.h>
4557
4558 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4559 {
4560         return wb_domain_init(&memcg->cgwb_domain, gfp);
4561 }
4562
4563 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4564 {
4565         wb_domain_exit(&memcg->cgwb_domain);
4566 }
4567
4568 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4569 {
4570         wb_domain_size_changed(&memcg->cgwb_domain);
4571 }
4572
4573 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4574 {
4575         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4576
4577         if (!memcg->css.parent)
4578                 return NULL;
4579
4580         return &memcg->cgwb_domain;
4581 }
4582
4583 /**
4584  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4585  * @wb: bdi_writeback in question
4586  * @pfilepages: out parameter for number of file pages
4587  * @pheadroom: out parameter for number of allocatable pages according to memcg
4588  * @pdirty: out parameter for number of dirty pages
4589  * @pwriteback: out parameter for number of pages under writeback
4590  *
4591  * Determine the numbers of file, headroom, dirty, and writeback pages in
4592  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4593  * is a bit more involved.
4594  *
4595  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4596  * headroom is calculated as the lowest headroom of itself and the
4597  * ancestors.  Note that this doesn't consider the actual amount of
4598  * available memory in the system.  The caller should further cap
4599  * *@pheadroom accordingly.
4600  */
4601 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4602                          unsigned long *pheadroom, unsigned long *pdirty,
4603                          unsigned long *pwriteback)
4604 {
4605         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4606         struct mem_cgroup *parent;
4607
4608         mem_cgroup_flush_stats();
4609
4610         *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4611         *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4612         *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4613                         memcg_page_state(memcg, NR_ACTIVE_FILE);
4614
4615         *pheadroom = PAGE_COUNTER_MAX;
4616         while ((parent = parent_mem_cgroup(memcg))) {
4617                 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4618                                             READ_ONCE(memcg->memory.high));
4619                 unsigned long used = page_counter_read(&memcg->memory);
4620
4621                 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4622                 memcg = parent;
4623         }
4624 }
4625
4626 /*
4627  * Foreign dirty flushing
4628  *
4629  * There's an inherent mismatch between memcg and writeback.  The former
4630  * tracks ownership per-page while the latter per-inode.  This was a
4631  * deliberate design decision because honoring per-page ownership in the
4632  * writeback path is complicated, may lead to higher CPU and IO overheads
4633  * and deemed unnecessary given that write-sharing an inode across
4634  * different cgroups isn't a common use-case.
4635  *
4636  * Combined with inode majority-writer ownership switching, this works well
4637  * enough in most cases but there are some pathological cases.  For
4638  * example, let's say there are two cgroups A and B which keep writing to
4639  * different but confined parts of the same inode.  B owns the inode and
4640  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4641  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4642  * triggering background writeback.  A will be slowed down without a way to
4643  * make writeback of the dirty pages happen.
4644  *
4645  * Conditions like the above can lead to a cgroup getting repeatedly and
4646  * severely throttled after making some progress after each
4647  * dirty_expire_interval while the underlying IO device is almost
4648  * completely idle.
4649  *
4650  * Solving this problem completely requires matching the ownership tracking
4651  * granularities between memcg and writeback in either direction.  However,
4652  * the more egregious behaviors can be avoided by simply remembering the
4653  * most recent foreign dirtying events and initiating remote flushes on
4654  * them when local writeback isn't enough to keep the memory clean enough.
4655  *
4656  * The following two functions implement such mechanism.  When a foreign
4657  * page - a page whose memcg and writeback ownerships don't match - is
4658  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4659  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4660  * decides that the memcg needs to sleep due to high dirty ratio, it calls
4661  * mem_cgroup_flush_foreign() which queues writeback on the recorded
4662  * foreign bdi_writebacks which haven't expired.  Both the numbers of
4663  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4664  * limited to MEMCG_CGWB_FRN_CNT.
4665  *
4666  * The mechanism only remembers IDs and doesn't hold any object references.
4667  * As being wrong occasionally doesn't matter, updates and accesses to the
4668  * records are lockless and racy.
4669  */
4670 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4671                                              struct bdi_writeback *wb)
4672 {
4673         struct mem_cgroup *memcg = folio_memcg(folio);
4674         struct memcg_cgwb_frn *frn;
4675         u64 now = get_jiffies_64();
4676         u64 oldest_at = now;
4677         int oldest = -1;
4678         int i;
4679
4680         trace_track_foreign_dirty(folio, wb);
4681
4682         /*
4683          * Pick the slot to use.  If there is already a slot for @wb, keep
4684          * using it.  If not replace the oldest one which isn't being
4685          * written out.
4686          */
4687         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4688                 frn = &memcg->cgwb_frn[i];
4689                 if (frn->bdi_id == wb->bdi->id &&
4690                     frn->memcg_id == wb->memcg_css->id)
4691                         break;
4692                 if (time_before64(frn->at, oldest_at) &&
4693                     atomic_read(&frn->done.cnt) == 1) {
4694                         oldest = i;
4695                         oldest_at = frn->at;
4696                 }
4697         }
4698
4699         if (i < MEMCG_CGWB_FRN_CNT) {
4700                 /*
4701                  * Re-using an existing one.  Update timestamp lazily to
4702                  * avoid making the cacheline hot.  We want them to be
4703                  * reasonably up-to-date and significantly shorter than
4704                  * dirty_expire_interval as that's what expires the record.
4705                  * Use the shorter of 1s and dirty_expire_interval / 8.
4706                  */
4707                 unsigned long update_intv =
4708                         min_t(unsigned long, HZ,
4709                               msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4710
4711                 if (time_before64(frn->at, now - update_intv))
4712                         frn->at = now;
4713         } else if (oldest >= 0) {
4714                 /* replace the oldest free one */
4715                 frn = &memcg->cgwb_frn[oldest];
4716                 frn->bdi_id = wb->bdi->id;
4717                 frn->memcg_id = wb->memcg_css->id;
4718                 frn->at = now;
4719         }
4720 }
4721
4722 /* issue foreign writeback flushes for recorded foreign dirtying events */
4723 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4724 {
4725         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4726         unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4727         u64 now = jiffies_64;
4728         int i;
4729
4730         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4731                 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4732
4733                 /*
4734                  * If the record is older than dirty_expire_interval,
4735                  * writeback on it has already started.  No need to kick it
4736                  * off again.  Also, don't start a new one if there's
4737                  * already one in flight.
4738                  */
4739                 if (time_after64(frn->at, now - intv) &&
4740                     atomic_read(&frn->done.cnt) == 1) {
4741                         frn->at = 0;
4742                         trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4743                         cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4744                                                WB_REASON_FOREIGN_FLUSH,
4745                                                &frn->done);
4746                 }
4747         }
4748 }
4749
4750 #else   /* CONFIG_CGROUP_WRITEBACK */
4751
4752 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4753 {
4754         return 0;
4755 }
4756
4757 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4758 {
4759 }
4760
4761 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4762 {
4763 }
4764
4765 #endif  /* CONFIG_CGROUP_WRITEBACK */
4766
4767 /*
4768  * DO NOT USE IN NEW FILES.
4769  *
4770  * "cgroup.event_control" implementation.
4771  *
4772  * This is way over-engineered.  It tries to support fully configurable
4773  * events for each user.  Such level of flexibility is completely
4774  * unnecessary especially in the light of the planned unified hierarchy.
4775  *
4776  * Please deprecate this and replace with something simpler if at all
4777  * possible.
4778  */
4779
4780 /*
4781  * Unregister event and free resources.
4782  *
4783  * Gets called from workqueue.
4784  */
4785 static void memcg_event_remove(struct work_struct *work)
4786 {
4787         struct mem_cgroup_event *event =
4788                 container_of(work, struct mem_cgroup_event, remove);
4789         struct mem_cgroup *memcg = event->memcg;
4790
4791         remove_wait_queue(event->wqh, &event->wait);
4792
4793         event->unregister_event(memcg, event->eventfd);
4794
4795         /* Notify userspace the event is going away. */
4796         eventfd_signal(event->eventfd, 1);
4797
4798         eventfd_ctx_put(event->eventfd);
4799         kfree(event);
4800         css_put(&memcg->css);
4801 }
4802
4803 /*
4804  * Gets called on EPOLLHUP on eventfd when user closes it.
4805  *
4806  * Called with wqh->lock held and interrupts disabled.
4807  */
4808 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4809                             int sync, void *key)
4810 {
4811         struct mem_cgroup_event *event =
4812                 container_of(wait, struct mem_cgroup_event, wait);
4813         struct mem_cgroup *memcg = event->memcg;
4814         __poll_t flags = key_to_poll(key);
4815
4816         if (flags & EPOLLHUP) {
4817                 /*
4818                  * If the event has been detached at cgroup removal, we
4819                  * can simply return knowing the other side will cleanup
4820                  * for us.
4821                  *
4822                  * We can't race against event freeing since the other
4823                  * side will require wqh->lock via remove_wait_queue(),
4824                  * which we hold.
4825                  */
4826                 spin_lock(&memcg->event_list_lock);
4827                 if (!list_empty(&event->list)) {
4828                         list_del_init(&event->list);
4829                         /*
4830                          * We are in atomic context, but cgroup_event_remove()
4831                          * may sleep, so we have to call it in workqueue.
4832                          */
4833                         schedule_work(&event->remove);
4834                 }
4835                 spin_unlock(&memcg->event_list_lock);
4836         }
4837
4838         return 0;
4839 }
4840
4841 static void memcg_event_ptable_queue_proc(struct file *file,
4842                 wait_queue_head_t *wqh, poll_table *pt)
4843 {
4844         struct mem_cgroup_event *event =
4845                 container_of(pt, struct mem_cgroup_event, pt);
4846
4847         event->wqh = wqh;
4848         add_wait_queue(wqh, &event->wait);
4849 }
4850
4851 /*
4852  * DO NOT USE IN NEW FILES.
4853  *
4854  * Parse input and register new cgroup event handler.
4855  *
4856  * Input must be in format '<event_fd> <control_fd> <args>'.
4857  * Interpretation of args is defined by control file implementation.
4858  */
4859 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4860                                          char *buf, size_t nbytes, loff_t off)
4861 {
4862         struct cgroup_subsys_state *css = of_css(of);
4863         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4864         struct mem_cgroup_event *event;
4865         struct cgroup_subsys_state *cfile_css;
4866         unsigned int efd, cfd;
4867         struct fd efile;
4868         struct fd cfile;
4869         struct dentry *cdentry;
4870         const char *name;
4871         char *endp;
4872         int ret;
4873
4874         if (IS_ENABLED(CONFIG_PREEMPT_RT))
4875                 return -EOPNOTSUPP;
4876
4877         buf = strstrip(buf);
4878
4879         efd = simple_strtoul(buf, &endp, 10);
4880         if (*endp != ' ')
4881                 return -EINVAL;
4882         buf = endp + 1;
4883
4884         cfd = simple_strtoul(buf, &endp, 10);
4885         if ((*endp != ' ') && (*endp != '\0'))
4886                 return -EINVAL;
4887         buf = endp + 1;
4888
4889         event = kzalloc(sizeof(*event), GFP_KERNEL);
4890         if (!event)
4891                 return -ENOMEM;
4892
4893         event->memcg = memcg;
4894         INIT_LIST_HEAD(&event->list);
4895         init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4896         init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4897         INIT_WORK(&event->remove, memcg_event_remove);
4898
4899         efile = fdget(efd);
4900         if (!efile.file) {
4901                 ret = -EBADF;
4902                 goto out_kfree;
4903         }
4904
4905         event->eventfd = eventfd_ctx_fileget(efile.file);
4906         if (IS_ERR(event->eventfd)) {
4907                 ret = PTR_ERR(event->eventfd);
4908                 goto out_put_efile;
4909         }
4910
4911         cfile = fdget(cfd);
4912         if (!cfile.file) {
4913                 ret = -EBADF;
4914                 goto out_put_eventfd;
4915         }
4916
4917         /* the process need read permission on control file */
4918         /* AV: shouldn't we check that it's been opened for read instead? */
4919         ret = file_permission(cfile.file, MAY_READ);
4920         if (ret < 0)
4921                 goto out_put_cfile;
4922
4923         /*
4924          * The control file must be a regular cgroup1 file. As a regular cgroup
4925          * file can't be renamed, it's safe to access its name afterwards.
4926          */
4927         cdentry = cfile.file->f_path.dentry;
4928         if (cdentry->d_sb->s_type != &cgroup_fs_type || !d_is_reg(cdentry)) {
4929                 ret = -EINVAL;
4930                 goto out_put_cfile;
4931         }
4932
4933         /*
4934          * Determine the event callbacks and set them in @event.  This used
4935          * to be done via struct cftype but cgroup core no longer knows
4936          * about these events.  The following is crude but the whole thing
4937          * is for compatibility anyway.
4938          *
4939          * DO NOT ADD NEW FILES.
4940          */
4941         name = cdentry->d_name.name;
4942
4943         if (!strcmp(name, "memory.usage_in_bytes")) {
4944                 event->register_event = mem_cgroup_usage_register_event;
4945                 event->unregister_event = mem_cgroup_usage_unregister_event;
4946         } else if (!strcmp(name, "memory.oom_control")) {
4947                 event->register_event = mem_cgroup_oom_register_event;
4948                 event->unregister_event = mem_cgroup_oom_unregister_event;
4949         } else if (!strcmp(name, "memory.pressure_level")) {
4950                 event->register_event = vmpressure_register_event;
4951                 event->unregister_event = vmpressure_unregister_event;
4952         } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4953                 event->register_event = memsw_cgroup_usage_register_event;
4954                 event->unregister_event = memsw_cgroup_usage_unregister_event;
4955         } else {
4956                 ret = -EINVAL;
4957                 goto out_put_cfile;
4958         }
4959
4960         /*
4961          * Verify @cfile should belong to @css.  Also, remaining events are
4962          * automatically removed on cgroup destruction but the removal is
4963          * asynchronous, so take an extra ref on @css.
4964          */
4965         cfile_css = css_tryget_online_from_dir(cdentry->d_parent,
4966                                                &memory_cgrp_subsys);
4967         ret = -EINVAL;
4968         if (IS_ERR(cfile_css))
4969                 goto out_put_cfile;
4970         if (cfile_css != css) {
4971                 css_put(cfile_css);
4972                 goto out_put_cfile;
4973         }
4974
4975         ret = event->register_event(memcg, event->eventfd, buf);
4976         if (ret)
4977                 goto out_put_css;
4978
4979         vfs_poll(efile.file, &event->pt);
4980
4981         spin_lock_irq(&memcg->event_list_lock);
4982         list_add(&event->list, &memcg->event_list);
4983         spin_unlock_irq(&memcg->event_list_lock);
4984
4985         fdput(cfile);
4986         fdput(efile);
4987
4988         return nbytes;
4989
4990 out_put_css:
4991         css_put(css);
4992 out_put_cfile:
4993         fdput(cfile);
4994 out_put_eventfd:
4995         eventfd_ctx_put(event->eventfd);
4996 out_put_efile:
4997         fdput(efile);
4998 out_kfree:
4999         kfree(event);
5000
5001         return ret;
5002 }
5003
5004 #if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5005 static int mem_cgroup_slab_show(struct seq_file *m, void *p)
5006 {
5007         /*
5008          * Deprecated.
5009          * Please, take a look at tools/cgroup/memcg_slabinfo.py .
5010          */
5011         return 0;
5012 }
5013 #endif
5014
5015 static int memory_stat_show(struct seq_file *m, void *v);
5016
5017 static struct cftype mem_cgroup_legacy_files[] = {
5018         {
5019                 .name = "usage_in_bytes",
5020                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
5021                 .read_u64 = mem_cgroup_read_u64,
5022         },
5023         {
5024                 .name = "max_usage_in_bytes",
5025                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
5026                 .write = mem_cgroup_reset,
5027                 .read_u64 = mem_cgroup_read_u64,
5028         },
5029         {
5030                 .name = "limit_in_bytes",
5031                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
5032                 .write = mem_cgroup_write,
5033                 .read_u64 = mem_cgroup_read_u64,
5034         },
5035         {
5036                 .name = "soft_limit_in_bytes",
5037                 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
5038                 .write = mem_cgroup_write,
5039                 .read_u64 = mem_cgroup_read_u64,
5040         },
5041         {
5042                 .name = "failcnt",
5043                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
5044                 .write = mem_cgroup_reset,
5045                 .read_u64 = mem_cgroup_read_u64,
5046         },
5047         {
5048                 .name = "stat",
5049                 .seq_show = memory_stat_show,
5050         },
5051         {
5052                 .name = "force_empty",
5053                 .write = mem_cgroup_force_empty_write,
5054         },
5055         {
5056                 .name = "use_hierarchy",
5057                 .write_u64 = mem_cgroup_hierarchy_write,
5058                 .read_u64 = mem_cgroup_hierarchy_read,
5059         },
5060         {
5061                 .name = "cgroup.event_control",         /* XXX: for compat */
5062                 .write = memcg_write_event_control,
5063                 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
5064         },
5065         {
5066                 .name = "swappiness",
5067                 .read_u64 = mem_cgroup_swappiness_read,
5068                 .write_u64 = mem_cgroup_swappiness_write,
5069         },
5070         {
5071                 .name = "move_charge_at_immigrate",
5072                 .read_u64 = mem_cgroup_move_charge_read,
5073                 .write_u64 = mem_cgroup_move_charge_write,
5074         },
5075         {
5076                 .name = "oom_control",
5077                 .seq_show = mem_cgroup_oom_control_read,
5078                 .write_u64 = mem_cgroup_oom_control_write,
5079         },
5080         {
5081                 .name = "pressure_level",
5082                 .seq_show = mem_cgroup_dummy_seq_show,
5083         },
5084 #ifdef CONFIG_NUMA
5085         {
5086                 .name = "numa_stat",
5087                 .seq_show = memcg_numa_stat_show,
5088         },
5089 #endif
5090         {
5091                 .name = "kmem.limit_in_bytes",
5092                 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
5093                 .write = mem_cgroup_write,
5094                 .read_u64 = mem_cgroup_read_u64,
5095         },
5096         {
5097                 .name = "kmem.usage_in_bytes",
5098                 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
5099                 .read_u64 = mem_cgroup_read_u64,
5100         },
5101         {
5102                 .name = "kmem.failcnt",
5103                 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
5104                 .write = mem_cgroup_reset,
5105                 .read_u64 = mem_cgroup_read_u64,
5106         },
5107         {
5108                 .name = "kmem.max_usage_in_bytes",
5109                 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5110                 .write = mem_cgroup_reset,
5111                 .read_u64 = mem_cgroup_read_u64,
5112         },
5113 #if defined(CONFIG_MEMCG_KMEM) && \
5114         (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5115         {
5116                 .name = "kmem.slabinfo",
5117                 .seq_show = mem_cgroup_slab_show,
5118         },
5119 #endif
5120         {
5121                 .name = "kmem.tcp.limit_in_bytes",
5122                 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5123                 .write = mem_cgroup_write,
5124                 .read_u64 = mem_cgroup_read_u64,
5125         },
5126         {
5127                 .name = "kmem.tcp.usage_in_bytes",
5128                 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5129                 .read_u64 = mem_cgroup_read_u64,
5130         },
5131         {
5132                 .name = "kmem.tcp.failcnt",
5133                 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5134                 .write = mem_cgroup_reset,
5135                 .read_u64 = mem_cgroup_read_u64,
5136         },
5137         {
5138                 .name = "kmem.tcp.max_usage_in_bytes",
5139                 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5140                 .write = mem_cgroup_reset,
5141                 .read_u64 = mem_cgroup_read_u64,
5142         },
5143         { },    /* terminate */
5144 };
5145
5146 /*
5147  * Private memory cgroup IDR
5148  *
5149  * Swap-out records and page cache shadow entries need to store memcg
5150  * references in constrained space, so we maintain an ID space that is
5151  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5152  * memory-controlled cgroups to 64k.
5153  *
5154  * However, there usually are many references to the offline CSS after
5155  * the cgroup has been destroyed, such as page cache or reclaimable
5156  * slab objects, that don't need to hang on to the ID. We want to keep
5157  * those dead CSS from occupying IDs, or we might quickly exhaust the
5158  * relatively small ID space and prevent the creation of new cgroups
5159  * even when there are much fewer than 64k cgroups - possibly none.
5160  *
5161  * Maintain a private 16-bit ID space for memcg, and allow the ID to
5162  * be freed and recycled when it's no longer needed, which is usually
5163  * when the CSS is offlined.
5164  *
5165  * The only exception to that are records of swapped out tmpfs/shmem
5166  * pages that need to be attributed to live ancestors on swapin. But
5167  * those references are manageable from userspace.
5168  */
5169
5170 #define MEM_CGROUP_ID_MAX       ((1UL << MEM_CGROUP_ID_SHIFT) - 1)
5171 static DEFINE_IDR(mem_cgroup_idr);
5172
5173 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5174 {
5175         if (memcg->id.id > 0) {
5176                 idr_remove(&mem_cgroup_idr, memcg->id.id);
5177                 memcg->id.id = 0;
5178         }
5179 }
5180
5181 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5182                                                   unsigned int n)
5183 {
5184         refcount_add(n, &memcg->id.ref);
5185 }
5186
5187 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5188 {
5189         if (refcount_sub_and_test(n, &memcg->id.ref)) {
5190                 mem_cgroup_id_remove(memcg);
5191
5192                 /* Memcg ID pins CSS */
5193                 css_put(&memcg->css);
5194         }
5195 }
5196
5197 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5198 {
5199         mem_cgroup_id_put_many(memcg, 1);
5200 }
5201
5202 /**
5203  * mem_cgroup_from_id - look up a memcg from a memcg id
5204  * @id: the memcg id to look up
5205  *
5206  * Caller must hold rcu_read_lock().
5207  */
5208 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5209 {
5210         WARN_ON_ONCE(!rcu_read_lock_held());
5211         return idr_find(&mem_cgroup_idr, id);
5212 }
5213
5214 #ifdef CONFIG_SHRINKER_DEBUG
5215 struct mem_cgroup *mem_cgroup_get_from_ino(unsigned long ino)
5216 {
5217         struct cgroup *cgrp;
5218         struct cgroup_subsys_state *css;
5219         struct mem_cgroup *memcg;
5220
5221         cgrp = cgroup_get_from_id(ino);
5222         if (IS_ERR(cgrp))
5223                 return ERR_CAST(cgrp);
5224
5225         css = cgroup_get_e_css(cgrp, &memory_cgrp_subsys);
5226         if (css)
5227                 memcg = container_of(css, struct mem_cgroup, css);
5228         else
5229                 memcg = ERR_PTR(-ENOENT);
5230
5231         cgroup_put(cgrp);
5232
5233         return memcg;
5234 }
5235 #endif
5236
5237 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5238 {
5239         struct mem_cgroup_per_node *pn;
5240
5241         pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
5242         if (!pn)
5243                 return 1;
5244
5245         pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5246                                                    GFP_KERNEL_ACCOUNT);
5247         if (!pn->lruvec_stats_percpu) {
5248                 kfree(pn);
5249                 return 1;
5250         }
5251
5252         lruvec_init(&pn->lruvec);
5253         pn->memcg = memcg;
5254
5255         memcg->nodeinfo[node] = pn;
5256         return 0;
5257 }
5258
5259 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5260 {
5261         struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5262
5263         if (!pn)
5264                 return;
5265
5266         free_percpu(pn->lruvec_stats_percpu);
5267         kfree(pn);
5268 }
5269
5270 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5271 {
5272         int node;
5273
5274         for_each_node(node)
5275                 free_mem_cgroup_per_node_info(memcg, node);
5276         kfree(memcg->vmstats);
5277         free_percpu(memcg->vmstats_percpu);
5278         kfree(memcg);
5279 }
5280
5281 static void mem_cgroup_free(struct mem_cgroup *memcg)
5282 {
5283         lru_gen_exit_memcg(memcg);
5284         memcg_wb_domain_exit(memcg);
5285         __mem_cgroup_free(memcg);
5286 }
5287
5288 static struct mem_cgroup *mem_cgroup_alloc(void)
5289 {
5290         struct mem_cgroup *memcg;
5291         int node;
5292         int __maybe_unused i;
5293         long error = -ENOMEM;
5294
5295         memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
5296         if (!memcg)
5297                 return ERR_PTR(error);
5298
5299         memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5300                                  1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
5301         if (memcg->id.id < 0) {
5302                 error = memcg->id.id;
5303                 goto fail;
5304         }
5305
5306         memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), GFP_KERNEL);
5307         if (!memcg->vmstats)
5308                 goto fail;
5309
5310         memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5311                                                  GFP_KERNEL_ACCOUNT);
5312         if (!memcg->vmstats_percpu)
5313                 goto fail;
5314
5315         for_each_node(node)
5316                 if (alloc_mem_cgroup_per_node_info(memcg, node))
5317                         goto fail;
5318
5319         if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5320                 goto fail;
5321
5322         INIT_WORK(&memcg->high_work, high_work_func);
5323         INIT_LIST_HEAD(&memcg->oom_notify);
5324         mutex_init(&memcg->thresholds_lock);
5325         spin_lock_init(&memcg->move_lock);
5326         vmpressure_init(&memcg->vmpressure);
5327         INIT_LIST_HEAD(&memcg->event_list);
5328         spin_lock_init(&memcg->event_list_lock);
5329         memcg->socket_pressure = jiffies;
5330 #ifdef CONFIG_MEMCG_KMEM
5331         memcg->kmemcg_id = -1;
5332         INIT_LIST_HEAD(&memcg->objcg_list);
5333 #endif
5334 #ifdef CONFIG_CGROUP_WRITEBACK
5335         INIT_LIST_HEAD(&memcg->cgwb_list);
5336         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5337                 memcg->cgwb_frn[i].done =
5338                         __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5339 #endif
5340 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5341         spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5342         INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5343         memcg->deferred_split_queue.split_queue_len = 0;
5344 #endif
5345         lru_gen_init_memcg(memcg);
5346         return memcg;
5347 fail:
5348         mem_cgroup_id_remove(memcg);
5349         __mem_cgroup_free(memcg);
5350         return ERR_PTR(error);
5351 }
5352
5353 static struct cgroup_subsys_state * __ref
5354 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5355 {
5356         struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5357         struct mem_cgroup *memcg, *old_memcg;
5358
5359         old_memcg = set_active_memcg(parent);
5360         memcg = mem_cgroup_alloc();
5361         set_active_memcg(old_memcg);
5362         if (IS_ERR(memcg))
5363                 return ERR_CAST(memcg);
5364
5365         page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5366         WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5367 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
5368         memcg->zswap_max = PAGE_COUNTER_MAX;
5369 #endif
5370         page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5371         if (parent) {
5372                 WRITE_ONCE(memcg->swappiness, mem_cgroup_swappiness(parent));
5373                 WRITE_ONCE(memcg->oom_kill_disable, READ_ONCE(parent->oom_kill_disable));
5374
5375                 page_counter_init(&memcg->memory, &parent->memory);
5376                 page_counter_init(&memcg->swap, &parent->swap);
5377                 page_counter_init(&memcg->kmem, &parent->kmem);
5378                 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5379         } else {
5380                 init_memcg_events();
5381                 page_counter_init(&memcg->memory, NULL);
5382                 page_counter_init(&memcg->swap, NULL);
5383                 page_counter_init(&memcg->kmem, NULL);
5384                 page_counter_init(&memcg->tcpmem, NULL);
5385
5386                 root_mem_cgroup = memcg;
5387                 return &memcg->css;
5388         }
5389
5390         if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5391                 static_branch_inc(&memcg_sockets_enabled_key);
5392
5393 #if defined(CONFIG_MEMCG_KMEM)
5394         if (!cgroup_memory_nobpf)
5395                 static_branch_inc(&memcg_bpf_enabled_key);
5396 #endif
5397
5398         return &memcg->css;
5399 }
5400
5401 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5402 {
5403         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5404
5405         if (memcg_online_kmem(memcg))
5406                 goto remove_id;
5407
5408         /*
5409          * A memcg must be visible for expand_shrinker_info()
5410          * by the time the maps are allocated. So, we allocate maps
5411          * here, when for_each_mem_cgroup() can't skip it.
5412          */
5413         if (alloc_shrinker_info(memcg))
5414                 goto offline_kmem;
5415
5416         if (unlikely(mem_cgroup_is_root(memcg)))
5417                 queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5418                                    FLUSH_TIME);
5419         lru_gen_online_memcg(memcg);
5420
5421         /* Online state pins memcg ID, memcg ID pins CSS */
5422         refcount_set(&memcg->id.ref, 1);
5423         css_get(css);
5424
5425         /*
5426          * Ensure mem_cgroup_from_id() works once we're fully online.
5427          *
5428          * We could do this earlier and require callers to filter with
5429          * css_tryget_online(). But right now there are no users that
5430          * need earlier access, and the workingset code relies on the
5431          * cgroup tree linkage (mem_cgroup_get_nr_swap_pages()). So
5432          * publish it here at the end of onlining. This matches the
5433          * regular ID destruction during offlining.
5434          */
5435         idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5436
5437         return 0;
5438 offline_kmem:
5439         memcg_offline_kmem(memcg);
5440 remove_id:
5441         mem_cgroup_id_remove(memcg);
5442         return -ENOMEM;
5443 }
5444
5445 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5446 {
5447         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5448         struct mem_cgroup_event *event, *tmp;
5449
5450         /*
5451          * Unregister events and notify userspace.
5452          * Notify userspace about cgroup removing only after rmdir of cgroup
5453          * directory to avoid race between userspace and kernelspace.
5454          */
5455         spin_lock_irq(&memcg->event_list_lock);
5456         list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5457                 list_del_init(&event->list);
5458                 schedule_work(&event->remove);
5459         }
5460         spin_unlock_irq(&memcg->event_list_lock);
5461
5462         page_counter_set_min(&memcg->memory, 0);
5463         page_counter_set_low(&memcg->memory, 0);
5464
5465         memcg_offline_kmem(memcg);
5466         reparent_shrinker_deferred(memcg);
5467         wb_memcg_offline(memcg);
5468         lru_gen_offline_memcg(memcg);
5469
5470         drain_all_stock(memcg);
5471
5472         mem_cgroup_id_put(memcg);
5473 }
5474
5475 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5476 {
5477         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5478
5479         invalidate_reclaim_iterators(memcg);
5480         lru_gen_release_memcg(memcg);
5481 }
5482
5483 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5484 {
5485         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5486         int __maybe_unused i;
5487
5488 #ifdef CONFIG_CGROUP_WRITEBACK
5489         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5490                 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5491 #endif
5492         if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5493                 static_branch_dec(&memcg_sockets_enabled_key);
5494
5495         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5496                 static_branch_dec(&memcg_sockets_enabled_key);
5497
5498 #if defined(CONFIG_MEMCG_KMEM)
5499         if (!cgroup_memory_nobpf)
5500                 static_branch_dec(&memcg_bpf_enabled_key);
5501 #endif
5502
5503         vmpressure_cleanup(&memcg->vmpressure);
5504         cancel_work_sync(&memcg->high_work);
5505         mem_cgroup_remove_from_trees(memcg);
5506         free_shrinker_info(memcg);
5507         mem_cgroup_free(memcg);
5508 }
5509
5510 /**
5511  * mem_cgroup_css_reset - reset the states of a mem_cgroup
5512  * @css: the target css
5513  *
5514  * Reset the states of the mem_cgroup associated with @css.  This is
5515  * invoked when the userland requests disabling on the default hierarchy
5516  * but the memcg is pinned through dependency.  The memcg should stop
5517  * applying policies and should revert to the vanilla state as it may be
5518  * made visible again.
5519  *
5520  * The current implementation only resets the essential configurations.
5521  * This needs to be expanded to cover all the visible parts.
5522  */
5523 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5524 {
5525         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5526
5527         page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5528         page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5529         page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5530         page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5531         page_counter_set_min(&memcg->memory, 0);
5532         page_counter_set_low(&memcg->memory, 0);
5533         page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5534         WRITE_ONCE(memcg->soft_limit, PAGE_COUNTER_MAX);
5535         page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5536         memcg_wb_domain_size_changed(memcg);
5537 }
5538
5539 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5540 {
5541         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5542         struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5543         struct memcg_vmstats_percpu *statc;
5544         long delta, delta_cpu, v;
5545         int i, nid;
5546
5547         statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5548
5549         for (i = 0; i < MEMCG_NR_STAT; i++) {
5550                 /*
5551                  * Collect the aggregated propagation counts of groups
5552                  * below us. We're in a per-cpu loop here and this is
5553                  * a global counter, so the first cycle will get them.
5554                  */
5555                 delta = memcg->vmstats->state_pending[i];
5556                 if (delta)
5557                         memcg->vmstats->state_pending[i] = 0;
5558
5559                 /* Add CPU changes on this level since the last flush */
5560                 delta_cpu = 0;
5561                 v = READ_ONCE(statc->state[i]);
5562                 if (v != statc->state_prev[i]) {
5563                         delta_cpu = v - statc->state_prev[i];
5564                         delta += delta_cpu;
5565                         statc->state_prev[i] = v;
5566                 }
5567
5568                 /* Aggregate counts on this level and propagate upwards */
5569                 if (delta_cpu)
5570                         memcg->vmstats->state_local[i] += delta_cpu;
5571
5572                 if (delta) {
5573                         memcg->vmstats->state[i] += delta;
5574                         if (parent)
5575                                 parent->vmstats->state_pending[i] += delta;
5576                 }
5577         }
5578
5579         for (i = 0; i < NR_MEMCG_EVENTS; i++) {
5580                 delta = memcg->vmstats->events_pending[i];
5581                 if (delta)
5582                         memcg->vmstats->events_pending[i] = 0;
5583
5584                 delta_cpu = 0;
5585                 v = READ_ONCE(statc->events[i]);
5586                 if (v != statc->events_prev[i]) {
5587                         delta_cpu = v - statc->events_prev[i];
5588                         delta += delta_cpu;
5589                         statc->events_prev[i] = v;
5590                 }
5591
5592                 if (delta_cpu)
5593                         memcg->vmstats->events_local[i] += delta_cpu;
5594
5595                 if (delta) {
5596                         memcg->vmstats->events[i] += delta;
5597                         if (parent)
5598                                 parent->vmstats->events_pending[i] += delta;
5599                 }
5600         }
5601
5602         for_each_node_state(nid, N_MEMORY) {
5603                 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5604                 struct mem_cgroup_per_node *ppn = NULL;
5605                 struct lruvec_stats_percpu *lstatc;
5606
5607                 if (parent)
5608                         ppn = parent->nodeinfo[nid];
5609
5610                 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5611
5612                 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5613                         delta = pn->lruvec_stats.state_pending[i];
5614                         if (delta)
5615                                 pn->lruvec_stats.state_pending[i] = 0;
5616
5617                         delta_cpu = 0;
5618                         v = READ_ONCE(lstatc->state[i]);
5619                         if (v != lstatc->state_prev[i]) {
5620                                 delta_cpu = v - lstatc->state_prev[i];
5621                                 delta += delta_cpu;
5622                                 lstatc->state_prev[i] = v;
5623                         }
5624
5625                         if (delta_cpu)
5626                                 pn->lruvec_stats.state_local[i] += delta_cpu;
5627
5628                         if (delta) {
5629                                 pn->lruvec_stats.state[i] += delta;
5630                                 if (ppn)
5631                                         ppn->lruvec_stats.state_pending[i] += delta;
5632                         }
5633                 }
5634         }
5635 }
5636
5637 #ifdef CONFIG_MMU
5638 /* Handlers for move charge at task migration. */
5639 static int mem_cgroup_do_precharge(unsigned long count)
5640 {
5641         int ret;
5642
5643         /* Try a single bulk charge without reclaim first, kswapd may wake */
5644         ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5645         if (!ret) {
5646                 mc.precharge += count;
5647                 return ret;
5648         }
5649
5650         /* Try charges one by one with reclaim, but do not retry */
5651         while (count--) {
5652                 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5653                 if (ret)
5654                         return ret;
5655                 mc.precharge++;
5656                 cond_resched();
5657         }
5658         return 0;
5659 }
5660
5661 union mc_target {
5662         struct page     *page;
5663         swp_entry_t     ent;
5664 };
5665
5666 enum mc_target_type {
5667         MC_TARGET_NONE = 0,
5668         MC_TARGET_PAGE,
5669         MC_TARGET_SWAP,
5670         MC_TARGET_DEVICE,
5671 };
5672
5673 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5674                                                 unsigned long addr, pte_t ptent)
5675 {
5676         struct page *page = vm_normal_page(vma, addr, ptent);
5677
5678         if (!page)
5679                 return NULL;
5680         if (PageAnon(page)) {
5681                 if (!(mc.flags & MOVE_ANON))
5682                         return NULL;
5683         } else {
5684                 if (!(mc.flags & MOVE_FILE))
5685                         return NULL;
5686         }
5687         get_page(page);
5688
5689         return page;
5690 }
5691
5692 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5693 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5694                         pte_t ptent, swp_entry_t *entry)
5695 {
5696         struct page *page = NULL;
5697         swp_entry_t ent = pte_to_swp_entry(ptent);
5698
5699         if (!(mc.flags & MOVE_ANON))
5700                 return NULL;
5701
5702         /*
5703          * Handle device private pages that are not accessible by the CPU, but
5704          * stored as special swap entries in the page table.
5705          */
5706         if (is_device_private_entry(ent)) {
5707                 page = pfn_swap_entry_to_page(ent);
5708                 if (!get_page_unless_zero(page))
5709                         return NULL;
5710                 return page;
5711         }
5712
5713         if (non_swap_entry(ent))
5714                 return NULL;
5715
5716         /*
5717          * Because swap_cache_get_folio() updates some statistics counter,
5718          * we call find_get_page() with swapper_space directly.
5719          */
5720         page = find_get_page(swap_address_space(ent), swp_offset(ent));
5721         entry->val = ent.val;
5722
5723         return page;
5724 }
5725 #else
5726 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5727                         pte_t ptent, swp_entry_t *entry)
5728 {
5729         return NULL;
5730 }
5731 #endif
5732
5733 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5734                         unsigned long addr, pte_t ptent)
5735 {
5736         unsigned long index;
5737         struct folio *folio;
5738
5739         if (!vma->vm_file) /* anonymous vma */
5740                 return NULL;
5741         if (!(mc.flags & MOVE_FILE))
5742                 return NULL;
5743
5744         /* folio is moved even if it's not RSS of this task(page-faulted). */
5745         /* shmem/tmpfs may report page out on swap: account for that too. */
5746         index = linear_page_index(vma, addr);
5747         folio = filemap_get_incore_folio(vma->vm_file->f_mapping, index);
5748         if (IS_ERR(folio))
5749                 return NULL;
5750         return folio_file_page(folio, index);
5751 }
5752
5753 /**
5754  * mem_cgroup_move_account - move account of the page
5755  * @page: the page
5756  * @compound: charge the page as compound or small page
5757  * @from: mem_cgroup which the page is moved from.
5758  * @to: mem_cgroup which the page is moved to. @from != @to.
5759  *
5760  * The page must be locked and not on the LRU.
5761  *
5762  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5763  * from old cgroup.
5764  */
5765 static int mem_cgroup_move_account(struct page *page,
5766                                    bool compound,
5767                                    struct mem_cgroup *from,
5768                                    struct mem_cgroup *to)
5769 {
5770         struct folio *folio = page_folio(page);
5771         struct lruvec *from_vec, *to_vec;
5772         struct pglist_data *pgdat;
5773         unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5774         int nid, ret;
5775
5776         VM_BUG_ON(from == to);
5777         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
5778         VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5779         VM_BUG_ON(compound && !folio_test_large(folio));
5780
5781         ret = -EINVAL;
5782         if (folio_memcg(folio) != from)
5783                 goto out;
5784
5785         pgdat = folio_pgdat(folio);
5786         from_vec = mem_cgroup_lruvec(from, pgdat);
5787         to_vec = mem_cgroup_lruvec(to, pgdat);
5788
5789         folio_memcg_lock(folio);
5790
5791         if (folio_test_anon(folio)) {
5792                 if (folio_mapped(folio)) {
5793                         __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5794                         __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5795                         if (folio_test_pmd_mappable(folio)) {
5796                                 __mod_lruvec_state(from_vec, NR_ANON_THPS,
5797                                                    -nr_pages);
5798                                 __mod_lruvec_state(to_vec, NR_ANON_THPS,
5799                                                    nr_pages);
5800                         }
5801                 }
5802         } else {
5803                 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5804                 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5805
5806                 if (folio_test_swapbacked(folio)) {
5807                         __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5808                         __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5809                 }
5810
5811                 if (folio_mapped(folio)) {
5812                         __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5813                         __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5814                 }
5815
5816                 if (folio_test_dirty(folio)) {
5817                         struct address_space *mapping = folio_mapping(folio);
5818
5819                         if (mapping_can_writeback(mapping)) {
5820                                 __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5821                                                    -nr_pages);
5822                                 __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5823                                                    nr_pages);
5824                         }
5825                 }
5826         }
5827
5828 #ifdef CONFIG_SWAP
5829         if (folio_test_swapcache(folio)) {
5830                 __mod_lruvec_state(from_vec, NR_SWAPCACHE, -nr_pages);
5831                 __mod_lruvec_state(to_vec, NR_SWAPCACHE, nr_pages);
5832         }
5833 #endif
5834         if (folio_test_writeback(folio)) {
5835                 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5836                 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5837         }
5838
5839         /*
5840          * All state has been migrated, let's switch to the new memcg.
5841          *
5842          * It is safe to change page's memcg here because the page
5843          * is referenced, charged, isolated, and locked: we can't race
5844          * with (un)charging, migration, LRU putback, or anything else
5845          * that would rely on a stable page's memory cgroup.
5846          *
5847          * Note that folio_memcg_lock is a memcg lock, not a page lock,
5848          * to save space. As soon as we switch page's memory cgroup to a
5849          * new memcg that isn't locked, the above state can change
5850          * concurrently again. Make sure we're truly done with it.
5851          */
5852         smp_mb();
5853
5854         css_get(&to->css);
5855         css_put(&from->css);
5856
5857         folio->memcg_data = (unsigned long)to;
5858
5859         __folio_memcg_unlock(from);
5860
5861         ret = 0;
5862         nid = folio_nid(folio);
5863
5864         local_irq_disable();
5865         mem_cgroup_charge_statistics(to, nr_pages);
5866         memcg_check_events(to, nid);
5867         mem_cgroup_charge_statistics(from, -nr_pages);
5868         memcg_check_events(from, nid);
5869         local_irq_enable();
5870 out:
5871         return ret;
5872 }
5873
5874 /**
5875  * get_mctgt_type - get target type of moving charge
5876  * @vma: the vma the pte to be checked belongs
5877  * @addr: the address corresponding to the pte to be checked
5878  * @ptent: the pte to be checked
5879  * @target: the pointer the target page or swap ent will be stored(can be NULL)
5880  *
5881  * Context: Called with pte lock held.
5882  * Return:
5883  * * MC_TARGET_NONE - If the pte is not a target for move charge.
5884  * * MC_TARGET_PAGE - If the page corresponding to this pte is a target for
5885  *   move charge. If @target is not NULL, the page is stored in target->page
5886  *   with extra refcnt taken (Caller should release it).
5887  * * MC_TARGET_SWAP - If the swap entry corresponding to this pte is a
5888  *   target for charge migration.  If @target is not NULL, the entry is
5889  *   stored in target->ent.
5890  * * MC_TARGET_DEVICE - Like MC_TARGET_PAGE but page is device memory and
5891  *   thus not on the lru.  For now such page is charged like a regular page
5892  *   would be as it is just special memory taking the place of a regular page.
5893  *   See Documentations/vm/hmm.txt and include/linux/hmm.h
5894  */
5895 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5896                 unsigned long addr, pte_t ptent, union mc_target *target)
5897 {
5898         struct page *page = NULL;
5899         enum mc_target_type ret = MC_TARGET_NONE;
5900         swp_entry_t ent = { .val = 0 };
5901
5902         if (pte_present(ptent))
5903                 page = mc_handle_present_pte(vma, addr, ptent);
5904         else if (pte_none_mostly(ptent))
5905                 /*
5906                  * PTE markers should be treated as a none pte here, separated
5907                  * from other swap handling below.
5908                  */
5909                 page = mc_handle_file_pte(vma, addr, ptent);
5910         else if (is_swap_pte(ptent))
5911                 page = mc_handle_swap_pte(vma, ptent, &ent);
5912
5913         if (target && page) {
5914                 if (!trylock_page(page)) {
5915                         put_page(page);
5916                         return ret;
5917                 }
5918                 /*
5919                  * page_mapped() must be stable during the move. This
5920                  * pte is locked, so if it's present, the page cannot
5921                  * become unmapped. If it isn't, we have only partial
5922                  * control over the mapped state: the page lock will
5923                  * prevent new faults against pagecache and swapcache,
5924                  * so an unmapped page cannot become mapped. However,
5925                  * if the page is already mapped elsewhere, it can
5926                  * unmap, and there is nothing we can do about it.
5927                  * Alas, skip moving the page in this case.
5928                  */
5929                 if (!pte_present(ptent) && page_mapped(page)) {
5930                         unlock_page(page);
5931                         put_page(page);
5932                         return ret;
5933                 }
5934         }
5935
5936         if (!page && !ent.val)
5937                 return ret;
5938         if (page) {
5939                 /*
5940                  * Do only loose check w/o serialization.
5941                  * mem_cgroup_move_account() checks the page is valid or
5942                  * not under LRU exclusion.
5943                  */
5944                 if (page_memcg(page) == mc.from) {
5945                         ret = MC_TARGET_PAGE;
5946                         if (is_device_private_page(page) ||
5947                             is_device_coherent_page(page))
5948                                 ret = MC_TARGET_DEVICE;
5949                         if (target)
5950                                 target->page = page;
5951                 }
5952                 if (!ret || !target) {
5953                         if (target)
5954                                 unlock_page(page);
5955                         put_page(page);
5956                 }
5957         }
5958         /*
5959          * There is a swap entry and a page doesn't exist or isn't charged.
5960          * But we cannot move a tail-page in a THP.
5961          */
5962         if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5963             mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5964                 ret = MC_TARGET_SWAP;
5965                 if (target)
5966                         target->ent = ent;
5967         }
5968         return ret;
5969 }
5970
5971 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5972 /*
5973  * We don't consider PMD mapped swapping or file mapped pages because THP does
5974  * not support them for now.
5975  * Caller should make sure that pmd_trans_huge(pmd) is true.
5976  */
5977 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5978                 unsigned long addr, pmd_t pmd, union mc_target *target)
5979 {
5980         struct page *page = NULL;
5981         enum mc_target_type ret = MC_TARGET_NONE;
5982
5983         if (unlikely(is_swap_pmd(pmd))) {
5984                 VM_BUG_ON(thp_migration_supported() &&
5985                                   !is_pmd_migration_entry(pmd));
5986                 return ret;
5987         }
5988         page = pmd_page(pmd);
5989         VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5990         if (!(mc.flags & MOVE_ANON))
5991                 return ret;
5992         if (page_memcg(page) == mc.from) {
5993                 ret = MC_TARGET_PAGE;
5994                 if (target) {
5995                         get_page(page);
5996                         if (!trylock_page(page)) {
5997                                 put_page(page);
5998                                 return MC_TARGET_NONE;
5999                         }
6000                         target->page = page;
6001                 }
6002         }
6003         return ret;
6004 }
6005 #else
6006 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
6007                 unsigned long addr, pmd_t pmd, union mc_target *target)
6008 {
6009         return MC_TARGET_NONE;
6010 }
6011 #endif
6012
6013 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
6014                                         unsigned long addr, unsigned long end,
6015                                         struct mm_walk *walk)
6016 {
6017         struct vm_area_struct *vma = walk->vma;
6018         pte_t *pte;
6019         spinlock_t *ptl;
6020
6021         ptl = pmd_trans_huge_lock(pmd, vma);
6022         if (ptl) {
6023                 /*
6024                  * Note their can not be MC_TARGET_DEVICE for now as we do not
6025                  * support transparent huge page with MEMORY_DEVICE_PRIVATE but
6026                  * this might change.
6027                  */
6028                 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
6029                         mc.precharge += HPAGE_PMD_NR;
6030                 spin_unlock(ptl);
6031                 return 0;
6032         }
6033
6034         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6035         if (!pte)
6036                 return 0;
6037         for (; addr != end; pte++, addr += PAGE_SIZE)
6038                 if (get_mctgt_type(vma, addr, ptep_get(pte), NULL))
6039                         mc.precharge++; /* increment precharge temporarily */
6040         pte_unmap_unlock(pte - 1, ptl);
6041         cond_resched();
6042
6043         return 0;
6044 }
6045
6046 static const struct mm_walk_ops precharge_walk_ops = {
6047         .pmd_entry      = mem_cgroup_count_precharge_pte_range,
6048         .walk_lock      = PGWALK_RDLOCK,
6049 };
6050
6051 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
6052 {
6053         unsigned long precharge;
6054
6055         mmap_read_lock(mm);
6056         walk_page_range(mm, 0, ULONG_MAX, &precharge_walk_ops, NULL);
6057         mmap_read_unlock(mm);
6058
6059         precharge = mc.precharge;
6060         mc.precharge = 0;
6061
6062         return precharge;
6063 }
6064
6065 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
6066 {
6067         unsigned long precharge = mem_cgroup_count_precharge(mm);
6068
6069         VM_BUG_ON(mc.moving_task);
6070         mc.moving_task = current;
6071         return mem_cgroup_do_precharge(precharge);
6072 }
6073
6074 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
6075 static void __mem_cgroup_clear_mc(void)
6076 {
6077         struct mem_cgroup *from = mc.from;
6078         struct mem_cgroup *to = mc.to;
6079
6080         /* we must uncharge all the leftover precharges from mc.to */
6081         if (mc.precharge) {
6082                 cancel_charge(mc.to, mc.precharge);
6083                 mc.precharge = 0;
6084         }
6085         /*
6086          * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
6087          * we must uncharge here.
6088          */
6089         if (mc.moved_charge) {
6090                 cancel_charge(mc.from, mc.moved_charge);
6091                 mc.moved_charge = 0;
6092         }
6093         /* we must fixup refcnts and charges */
6094         if (mc.moved_swap) {
6095                 /* uncharge swap account from the old cgroup */
6096                 if (!mem_cgroup_is_root(mc.from))
6097                         page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
6098
6099                 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
6100
6101                 /*
6102                  * we charged both to->memory and to->memsw, so we
6103                  * should uncharge to->memory.
6104                  */
6105                 if (!mem_cgroup_is_root(mc.to))
6106                         page_counter_uncharge(&mc.to->memory, mc.moved_swap);
6107
6108                 mc.moved_swap = 0;
6109         }
6110         memcg_oom_recover(from);
6111         memcg_oom_recover(to);
6112         wake_up_all(&mc.waitq);
6113 }
6114
6115 static void mem_cgroup_clear_mc(void)
6116 {
6117         struct mm_struct *mm = mc.mm;
6118
6119         /*
6120          * we must clear moving_task before waking up waiters at the end of
6121          * task migration.
6122          */
6123         mc.moving_task = NULL;
6124         __mem_cgroup_clear_mc();
6125         spin_lock(&mc.lock);
6126         mc.from = NULL;
6127         mc.to = NULL;
6128         mc.mm = NULL;
6129         spin_unlock(&mc.lock);
6130
6131         mmput(mm);
6132 }
6133
6134 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6135 {
6136         struct cgroup_subsys_state *css;
6137         struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
6138         struct mem_cgroup *from;
6139         struct task_struct *leader, *p;
6140         struct mm_struct *mm;
6141         unsigned long move_flags;
6142         int ret = 0;
6143
6144         /* charge immigration isn't supported on the default hierarchy */
6145         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
6146                 return 0;
6147
6148         /*
6149          * Multi-process migrations only happen on the default hierarchy
6150          * where charge immigration is not used.  Perform charge
6151          * immigration if @tset contains a leader and whine if there are
6152          * multiple.
6153          */
6154         p = NULL;
6155         cgroup_taskset_for_each_leader(leader, css, tset) {
6156                 WARN_ON_ONCE(p);
6157                 p = leader;
6158                 memcg = mem_cgroup_from_css(css);
6159         }
6160         if (!p)
6161                 return 0;
6162
6163         /*
6164          * We are now committed to this value whatever it is. Changes in this
6165          * tunable will only affect upcoming migrations, not the current one.
6166          * So we need to save it, and keep it going.
6167          */
6168         move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
6169         if (!move_flags)
6170                 return 0;
6171
6172         from = mem_cgroup_from_task(p);
6173
6174         VM_BUG_ON(from == memcg);
6175
6176         mm = get_task_mm(p);
6177         if (!mm)
6178                 return 0;
6179         /* We move charges only when we move a owner of the mm */
6180         if (mm->owner == p) {
6181                 VM_BUG_ON(mc.from);
6182                 VM_BUG_ON(mc.to);
6183                 VM_BUG_ON(mc.precharge);
6184                 VM_BUG_ON(mc.moved_charge);
6185                 VM_BUG_ON(mc.moved_swap);
6186
6187                 spin_lock(&mc.lock);
6188                 mc.mm = mm;
6189                 mc.from = from;
6190                 mc.to = memcg;
6191                 mc.flags = move_flags;
6192                 spin_unlock(&mc.lock);
6193                 /* We set mc.moving_task later */
6194
6195                 ret = mem_cgroup_precharge_mc(mm);
6196                 if (ret)
6197                         mem_cgroup_clear_mc();
6198         } else {
6199                 mmput(mm);
6200         }
6201         return ret;
6202 }
6203
6204 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6205 {
6206         if (mc.to)
6207                 mem_cgroup_clear_mc();
6208 }
6209
6210 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6211                                 unsigned long addr, unsigned long end,
6212                                 struct mm_walk *walk)
6213 {
6214         int ret = 0;
6215         struct vm_area_struct *vma = walk->vma;
6216         pte_t *pte;
6217         spinlock_t *ptl;
6218         enum mc_target_type target_type;
6219         union mc_target target;
6220         struct page *page;
6221
6222         ptl = pmd_trans_huge_lock(pmd, vma);
6223         if (ptl) {
6224                 if (mc.precharge < HPAGE_PMD_NR) {
6225                         spin_unlock(ptl);
6226                         return 0;
6227                 }
6228                 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6229                 if (target_type == MC_TARGET_PAGE) {
6230                         page = target.page;
6231                         if (isolate_lru_page(page)) {
6232                                 if (!mem_cgroup_move_account(page, true,
6233                                                              mc.from, mc.to)) {
6234                                         mc.precharge -= HPAGE_PMD_NR;
6235                                         mc.moved_charge += HPAGE_PMD_NR;
6236                                 }
6237                                 putback_lru_page(page);
6238                         }
6239                         unlock_page(page);
6240                         put_page(page);
6241                 } else if (target_type == MC_TARGET_DEVICE) {
6242                         page = target.page;
6243                         if (!mem_cgroup_move_account(page, true,
6244                                                      mc.from, mc.to)) {
6245                                 mc.precharge -= HPAGE_PMD_NR;
6246                                 mc.moved_charge += HPAGE_PMD_NR;
6247                         }
6248                         unlock_page(page);
6249                         put_page(page);
6250                 }
6251                 spin_unlock(ptl);
6252                 return 0;
6253         }
6254
6255 retry:
6256         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6257         if (!pte)
6258                 return 0;
6259         for (; addr != end; addr += PAGE_SIZE) {
6260                 pte_t ptent = ptep_get(pte++);
6261                 bool device = false;
6262                 swp_entry_t ent;
6263
6264                 if (!mc.precharge)
6265                         break;
6266
6267                 switch (get_mctgt_type(vma, addr, ptent, &target)) {
6268                 case MC_TARGET_DEVICE:
6269                         device = true;
6270                         fallthrough;
6271                 case MC_TARGET_PAGE:
6272                         page = target.page;
6273                         /*
6274                          * We can have a part of the split pmd here. Moving it
6275                          * can be done but it would be too convoluted so simply
6276                          * ignore such a partial THP and keep it in original
6277                          * memcg. There should be somebody mapping the head.
6278                          */
6279                         if (PageTransCompound(page))
6280                                 goto put;
6281                         if (!device && !isolate_lru_page(page))
6282                                 goto put;
6283                         if (!mem_cgroup_move_account(page, false,
6284                                                 mc.from, mc.to)) {
6285                                 mc.precharge--;
6286                                 /* we uncharge from mc.from later. */
6287                                 mc.moved_charge++;
6288                         }
6289                         if (!device)
6290                                 putback_lru_page(page);
6291 put:                    /* get_mctgt_type() gets & locks the page */
6292                         unlock_page(page);
6293                         put_page(page);
6294                         break;
6295                 case MC_TARGET_SWAP:
6296                         ent = target.ent;
6297                         if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6298                                 mc.precharge--;
6299                                 mem_cgroup_id_get_many(mc.to, 1);
6300                                 /* we fixup other refcnts and charges later. */
6301                                 mc.moved_swap++;
6302                         }
6303                         break;
6304                 default:
6305                         break;
6306                 }
6307         }
6308         pte_unmap_unlock(pte - 1, ptl);
6309         cond_resched();
6310
6311         if (addr != end) {
6312                 /*
6313                  * We have consumed all precharges we got in can_attach().
6314                  * We try charge one by one, but don't do any additional
6315                  * charges to mc.to if we have failed in charge once in attach()
6316                  * phase.
6317                  */
6318                 ret = mem_cgroup_do_precharge(1);
6319                 if (!ret)
6320                         goto retry;
6321         }
6322
6323         return ret;
6324 }
6325
6326 static const struct mm_walk_ops charge_walk_ops = {
6327         .pmd_entry      = mem_cgroup_move_charge_pte_range,
6328         .walk_lock      = PGWALK_RDLOCK,
6329 };
6330
6331 static void mem_cgroup_move_charge(void)
6332 {
6333         lru_add_drain_all();
6334         /*
6335          * Signal folio_memcg_lock() to take the memcg's move_lock
6336          * while we're moving its pages to another memcg. Then wait
6337          * for already started RCU-only updates to finish.
6338          */
6339         atomic_inc(&mc.from->moving_account);
6340         synchronize_rcu();
6341 retry:
6342         if (unlikely(!mmap_read_trylock(mc.mm))) {
6343                 /*
6344                  * Someone who are holding the mmap_lock might be waiting in
6345                  * waitq. So we cancel all extra charges, wake up all waiters,
6346                  * and retry. Because we cancel precharges, we might not be able
6347                  * to move enough charges, but moving charge is a best-effort
6348                  * feature anyway, so it wouldn't be a big problem.
6349                  */
6350                 __mem_cgroup_clear_mc();
6351                 cond_resched();
6352                 goto retry;
6353         }
6354         /*
6355          * When we have consumed all precharges and failed in doing
6356          * additional charge, the page walk just aborts.
6357          */
6358         walk_page_range(mc.mm, 0, ULONG_MAX, &charge_walk_ops, NULL);
6359         mmap_read_unlock(mc.mm);
6360         atomic_dec(&mc.from->moving_account);
6361 }
6362
6363 static void mem_cgroup_move_task(void)
6364 {
6365         if (mc.to) {
6366                 mem_cgroup_move_charge();
6367                 mem_cgroup_clear_mc();
6368         }
6369 }
6370 #else   /* !CONFIG_MMU */
6371 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6372 {
6373         return 0;
6374 }
6375 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6376 {
6377 }
6378 static void mem_cgroup_move_task(void)
6379 {
6380 }
6381 #endif
6382
6383 #ifdef CONFIG_LRU_GEN
6384 static void mem_cgroup_attach(struct cgroup_taskset *tset)
6385 {
6386         struct task_struct *task;
6387         struct cgroup_subsys_state *css;
6388
6389         /* find the first leader if there is any */
6390         cgroup_taskset_for_each_leader(task, css, tset)
6391                 break;
6392
6393         if (!task)
6394                 return;
6395
6396         task_lock(task);
6397         if (task->mm && READ_ONCE(task->mm->owner) == task)
6398                 lru_gen_migrate_mm(task->mm);
6399         task_unlock(task);
6400 }
6401 #else
6402 static void mem_cgroup_attach(struct cgroup_taskset *tset)
6403 {
6404 }
6405 #endif /* CONFIG_LRU_GEN */
6406
6407 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6408 {
6409         if (value == PAGE_COUNTER_MAX)
6410                 seq_puts(m, "max\n");
6411         else
6412                 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6413
6414         return 0;
6415 }
6416
6417 static u64 memory_current_read(struct cgroup_subsys_state *css,
6418                                struct cftype *cft)
6419 {
6420         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6421
6422         return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6423 }
6424
6425 static u64 memory_peak_read(struct cgroup_subsys_state *css,
6426                             struct cftype *cft)
6427 {
6428         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6429
6430         return (u64)memcg->memory.watermark * PAGE_SIZE;
6431 }
6432
6433 static int memory_min_show(struct seq_file *m, void *v)
6434 {
6435         return seq_puts_memcg_tunable(m,
6436                 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6437 }
6438
6439 static ssize_t memory_min_write(struct kernfs_open_file *of,
6440                                 char *buf, size_t nbytes, loff_t off)
6441 {
6442         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6443         unsigned long min;
6444         int err;
6445
6446         buf = strstrip(buf);
6447         err = page_counter_memparse(buf, "max", &min);
6448         if (err)
6449                 return err;
6450
6451         page_counter_set_min(&memcg->memory, min);
6452
6453         return nbytes;
6454 }
6455
6456 static int memory_low_show(struct seq_file *m, void *v)
6457 {
6458         return seq_puts_memcg_tunable(m,
6459                 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6460 }
6461
6462 static ssize_t memory_low_write(struct kernfs_open_file *of,
6463                                 char *buf, size_t nbytes, loff_t off)
6464 {
6465         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6466         unsigned long low;
6467         int err;
6468
6469         buf = strstrip(buf);
6470         err = page_counter_memparse(buf, "max", &low);
6471         if (err)
6472                 return err;
6473
6474         page_counter_set_low(&memcg->memory, low);
6475
6476         return nbytes;
6477 }
6478
6479 static int memory_high_show(struct seq_file *m, void *v)
6480 {
6481         return seq_puts_memcg_tunable(m,
6482                 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6483 }
6484
6485 static ssize_t memory_high_write(struct kernfs_open_file *of,
6486                                  char *buf, size_t nbytes, loff_t off)
6487 {
6488         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6489         unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6490         bool drained = false;
6491         unsigned long high;
6492         int err;
6493
6494         buf = strstrip(buf);
6495         err = page_counter_memparse(buf, "max", &high);
6496         if (err)
6497                 return err;
6498
6499         page_counter_set_high(&memcg->memory, high);
6500
6501         for (;;) {
6502                 unsigned long nr_pages = page_counter_read(&memcg->memory);
6503                 unsigned long reclaimed;
6504
6505                 if (nr_pages <= high)
6506                         break;
6507
6508                 if (signal_pending(current))
6509                         break;
6510
6511                 if (!drained) {
6512                         drain_all_stock(memcg);
6513                         drained = true;
6514                         continue;
6515                 }
6516
6517                 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6518                                         GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP);
6519
6520                 if (!reclaimed && !nr_retries--)
6521                         break;
6522         }
6523
6524         memcg_wb_domain_size_changed(memcg);
6525         return nbytes;
6526 }
6527
6528 static int memory_max_show(struct seq_file *m, void *v)
6529 {
6530         return seq_puts_memcg_tunable(m,
6531                 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6532 }
6533
6534 static ssize_t memory_max_write(struct kernfs_open_file *of,
6535                                 char *buf, size_t nbytes, loff_t off)
6536 {
6537         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6538         unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6539         bool drained = false;
6540         unsigned long max;
6541         int err;
6542
6543         buf = strstrip(buf);
6544         err = page_counter_memparse(buf, "max", &max);
6545         if (err)
6546                 return err;
6547
6548         xchg(&memcg->memory.max, max);
6549
6550         for (;;) {
6551                 unsigned long nr_pages = page_counter_read(&memcg->memory);
6552
6553                 if (nr_pages <= max)
6554                         break;
6555
6556                 if (signal_pending(current))
6557                         break;
6558
6559                 if (!drained) {
6560                         drain_all_stock(memcg);
6561                         drained = true;
6562                         continue;
6563                 }
6564
6565                 if (nr_reclaims) {
6566                         if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6567                                         GFP_KERNEL, MEMCG_RECLAIM_MAY_SWAP))
6568                                 nr_reclaims--;
6569                         continue;
6570                 }
6571
6572                 memcg_memory_event(memcg, MEMCG_OOM);
6573                 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6574                         break;
6575         }
6576
6577         memcg_wb_domain_size_changed(memcg);
6578         return nbytes;
6579 }
6580
6581 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6582 {
6583         seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6584         seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6585         seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6586         seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6587         seq_printf(m, "oom_kill %lu\n",
6588                    atomic_long_read(&events[MEMCG_OOM_KILL]));
6589         seq_printf(m, "oom_group_kill %lu\n",
6590                    atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
6591 }
6592
6593 static int memory_events_show(struct seq_file *m, void *v)
6594 {
6595         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6596
6597         __memory_events_show(m, memcg->memory_events);
6598         return 0;
6599 }
6600
6601 static int memory_events_local_show(struct seq_file *m, void *v)
6602 {
6603         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6604
6605         __memory_events_show(m, memcg->memory_events_local);
6606         return 0;
6607 }
6608
6609 static int memory_stat_show(struct seq_file *m, void *v)
6610 {
6611         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6612         char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
6613         struct seq_buf s;
6614
6615         if (!buf)
6616                 return -ENOMEM;
6617         seq_buf_init(&s, buf, PAGE_SIZE);
6618         memory_stat_format(memcg, &s);
6619         seq_puts(m, buf);
6620         kfree(buf);
6621         return 0;
6622 }
6623
6624 #ifdef CONFIG_NUMA
6625 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6626                                                      int item)
6627 {
6628         return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item);
6629 }
6630
6631 static int memory_numa_stat_show(struct seq_file *m, void *v)
6632 {
6633         int i;
6634         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6635
6636         mem_cgroup_flush_stats();
6637
6638         for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6639                 int nid;
6640
6641                 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6642                         continue;
6643
6644                 seq_printf(m, "%s", memory_stats[i].name);
6645                 for_each_node_state(nid, N_MEMORY) {
6646                         u64 size;
6647                         struct lruvec *lruvec;
6648
6649                         lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6650                         size = lruvec_page_state_output(lruvec,
6651                                                         memory_stats[i].idx);
6652                         seq_printf(m, " N%d=%llu", nid, size);
6653                 }
6654                 seq_putc(m, '\n');
6655         }
6656
6657         return 0;
6658 }
6659 #endif
6660
6661 static int memory_oom_group_show(struct seq_file *m, void *v)
6662 {
6663         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6664
6665         seq_printf(m, "%d\n", READ_ONCE(memcg->oom_group));
6666
6667         return 0;
6668 }
6669
6670 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6671                                       char *buf, size_t nbytes, loff_t off)
6672 {
6673         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6674         int ret, oom_group;
6675
6676         buf = strstrip(buf);
6677         if (!buf)
6678                 return -EINVAL;
6679
6680         ret = kstrtoint(buf, 0, &oom_group);
6681         if (ret)
6682                 return ret;
6683
6684         if (oom_group != 0 && oom_group != 1)
6685                 return -EINVAL;
6686
6687         WRITE_ONCE(memcg->oom_group, oom_group);
6688
6689         return nbytes;
6690 }
6691
6692 static ssize_t memory_reclaim(struct kernfs_open_file *of, char *buf,
6693                               size_t nbytes, loff_t off)
6694 {
6695         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6696         unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6697         unsigned long nr_to_reclaim, nr_reclaimed = 0;
6698         unsigned int reclaim_options;
6699         int err;
6700
6701         buf = strstrip(buf);
6702         err = page_counter_memparse(buf, "", &nr_to_reclaim);
6703         if (err)
6704                 return err;
6705
6706         reclaim_options = MEMCG_RECLAIM_MAY_SWAP | MEMCG_RECLAIM_PROACTIVE;
6707         while (nr_reclaimed < nr_to_reclaim) {
6708                 unsigned long reclaimed;
6709
6710                 if (signal_pending(current))
6711                         return -EINTR;
6712
6713                 /*
6714                  * This is the final attempt, drain percpu lru caches in the
6715                  * hope of introducing more evictable pages for
6716                  * try_to_free_mem_cgroup_pages().
6717                  */
6718                 if (!nr_retries)
6719                         lru_add_drain_all();
6720
6721                 reclaimed = try_to_free_mem_cgroup_pages(memcg,
6722                                         min(nr_to_reclaim - nr_reclaimed, SWAP_CLUSTER_MAX),
6723                                         GFP_KERNEL, reclaim_options);
6724
6725                 if (!reclaimed && !nr_retries--)
6726                         return -EAGAIN;
6727
6728                 nr_reclaimed += reclaimed;
6729         }
6730
6731         return nbytes;
6732 }
6733
6734 static struct cftype memory_files[] = {
6735         {
6736                 .name = "current",
6737                 .flags = CFTYPE_NOT_ON_ROOT,
6738                 .read_u64 = memory_current_read,
6739         },
6740         {
6741                 .name = "peak",
6742                 .flags = CFTYPE_NOT_ON_ROOT,
6743                 .read_u64 = memory_peak_read,
6744         },
6745         {
6746                 .name = "min",
6747                 .flags = CFTYPE_NOT_ON_ROOT,
6748                 .seq_show = memory_min_show,
6749                 .write = memory_min_write,
6750         },
6751         {
6752                 .name = "low",
6753                 .flags = CFTYPE_NOT_ON_ROOT,
6754                 .seq_show = memory_low_show,
6755                 .write = memory_low_write,
6756         },
6757         {
6758                 .name = "high",
6759                 .flags = CFTYPE_NOT_ON_ROOT,
6760                 .seq_show = memory_high_show,
6761                 .write = memory_high_write,
6762         },
6763         {
6764                 .name = "max",
6765                 .flags = CFTYPE_NOT_ON_ROOT,
6766                 .seq_show = memory_max_show,
6767                 .write = memory_max_write,
6768         },
6769         {
6770                 .name = "events",
6771                 .flags = CFTYPE_NOT_ON_ROOT,
6772                 .file_offset = offsetof(struct mem_cgroup, events_file),
6773                 .seq_show = memory_events_show,
6774         },
6775         {
6776                 .name = "events.local",
6777                 .flags = CFTYPE_NOT_ON_ROOT,
6778                 .file_offset = offsetof(struct mem_cgroup, events_local_file),
6779                 .seq_show = memory_events_local_show,
6780         },
6781         {
6782                 .name = "stat",
6783                 .seq_show = memory_stat_show,
6784         },
6785 #ifdef CONFIG_NUMA
6786         {
6787                 .name = "numa_stat",
6788                 .seq_show = memory_numa_stat_show,
6789         },
6790 #endif
6791         {
6792                 .name = "oom.group",
6793                 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6794                 .seq_show = memory_oom_group_show,
6795                 .write = memory_oom_group_write,
6796         },
6797         {
6798                 .name = "reclaim",
6799                 .flags = CFTYPE_NS_DELEGATABLE,
6800                 .write = memory_reclaim,
6801         },
6802         { }     /* terminate */
6803 };
6804
6805 struct cgroup_subsys memory_cgrp_subsys = {
6806         .css_alloc = mem_cgroup_css_alloc,
6807         .css_online = mem_cgroup_css_online,
6808         .css_offline = mem_cgroup_css_offline,
6809         .css_released = mem_cgroup_css_released,
6810         .css_free = mem_cgroup_css_free,
6811         .css_reset = mem_cgroup_css_reset,
6812         .css_rstat_flush = mem_cgroup_css_rstat_flush,
6813         .can_attach = mem_cgroup_can_attach,
6814         .attach = mem_cgroup_attach,
6815         .cancel_attach = mem_cgroup_cancel_attach,
6816         .post_attach = mem_cgroup_move_task,
6817         .dfl_cftypes = memory_files,
6818         .legacy_cftypes = mem_cgroup_legacy_files,
6819         .early_init = 0,
6820 };
6821
6822 /*
6823  * This function calculates an individual cgroup's effective
6824  * protection which is derived from its own memory.min/low, its
6825  * parent's and siblings' settings, as well as the actual memory
6826  * distribution in the tree.
6827  *
6828  * The following rules apply to the effective protection values:
6829  *
6830  * 1. At the first level of reclaim, effective protection is equal to
6831  *    the declared protection in memory.min and memory.low.
6832  *
6833  * 2. To enable safe delegation of the protection configuration, at
6834  *    subsequent levels the effective protection is capped to the
6835  *    parent's effective protection.
6836  *
6837  * 3. To make complex and dynamic subtrees easier to configure, the
6838  *    user is allowed to overcommit the declared protection at a given
6839  *    level. If that is the case, the parent's effective protection is
6840  *    distributed to the children in proportion to how much protection
6841  *    they have declared and how much of it they are utilizing.
6842  *
6843  *    This makes distribution proportional, but also work-conserving:
6844  *    if one cgroup claims much more protection than it uses memory,
6845  *    the unused remainder is available to its siblings.
6846  *
6847  * 4. Conversely, when the declared protection is undercommitted at a
6848  *    given level, the distribution of the larger parental protection
6849  *    budget is NOT proportional. A cgroup's protection from a sibling
6850  *    is capped to its own memory.min/low setting.
6851  *
6852  * 5. However, to allow protecting recursive subtrees from each other
6853  *    without having to declare each individual cgroup's fixed share
6854  *    of the ancestor's claim to protection, any unutilized -
6855  *    "floating" - protection from up the tree is distributed in
6856  *    proportion to each cgroup's *usage*. This makes the protection
6857  *    neutral wrt sibling cgroups and lets them compete freely over
6858  *    the shared parental protection budget, but it protects the
6859  *    subtree as a whole from neighboring subtrees.
6860  *
6861  * Note that 4. and 5. are not in conflict: 4. is about protecting
6862  * against immediate siblings whereas 5. is about protecting against
6863  * neighboring subtrees.
6864  */
6865 static unsigned long effective_protection(unsigned long usage,
6866                                           unsigned long parent_usage,
6867                                           unsigned long setting,
6868                                           unsigned long parent_effective,
6869                                           unsigned long siblings_protected)
6870 {
6871         unsigned long protected;
6872         unsigned long ep;
6873
6874         protected = min(usage, setting);
6875         /*
6876          * If all cgroups at this level combined claim and use more
6877          * protection than what the parent affords them, distribute
6878          * shares in proportion to utilization.
6879          *
6880          * We are using actual utilization rather than the statically
6881          * claimed protection in order to be work-conserving: claimed
6882          * but unused protection is available to siblings that would
6883          * otherwise get a smaller chunk than what they claimed.
6884          */
6885         if (siblings_protected > parent_effective)
6886                 return protected * parent_effective / siblings_protected;
6887
6888         /*
6889          * Ok, utilized protection of all children is within what the
6890          * parent affords them, so we know whatever this child claims
6891          * and utilizes is effectively protected.
6892          *
6893          * If there is unprotected usage beyond this value, reclaim
6894          * will apply pressure in proportion to that amount.
6895          *
6896          * If there is unutilized protection, the cgroup will be fully
6897          * shielded from reclaim, but we do return a smaller value for
6898          * protection than what the group could enjoy in theory. This
6899          * is okay. With the overcommit distribution above, effective
6900          * protection is always dependent on how memory is actually
6901          * consumed among the siblings anyway.
6902          */
6903         ep = protected;
6904
6905         /*
6906          * If the children aren't claiming (all of) the protection
6907          * afforded to them by the parent, distribute the remainder in
6908          * proportion to the (unprotected) memory of each cgroup. That
6909          * way, cgroups that aren't explicitly prioritized wrt each
6910          * other compete freely over the allowance, but they are
6911          * collectively protected from neighboring trees.
6912          *
6913          * We're using unprotected memory for the weight so that if
6914          * some cgroups DO claim explicit protection, we don't protect
6915          * the same bytes twice.
6916          *
6917          * Check both usage and parent_usage against the respective
6918          * protected values. One should imply the other, but they
6919          * aren't read atomically - make sure the division is sane.
6920          */
6921         if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6922                 return ep;
6923         if (parent_effective > siblings_protected &&
6924             parent_usage > siblings_protected &&
6925             usage > protected) {
6926                 unsigned long unclaimed;
6927
6928                 unclaimed = parent_effective - siblings_protected;
6929                 unclaimed *= usage - protected;
6930                 unclaimed /= parent_usage - siblings_protected;
6931
6932                 ep += unclaimed;
6933         }
6934
6935         return ep;
6936 }
6937
6938 /**
6939  * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
6940  * @root: the top ancestor of the sub-tree being checked
6941  * @memcg: the memory cgroup to check
6942  *
6943  * WARNING: This function is not stateless! It can only be used as part
6944  *          of a top-down tree iteration, not for isolated queries.
6945  */
6946 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6947                                      struct mem_cgroup *memcg)
6948 {
6949         unsigned long usage, parent_usage;
6950         struct mem_cgroup *parent;
6951
6952         if (mem_cgroup_disabled())
6953                 return;
6954
6955         if (!root)
6956                 root = root_mem_cgroup;
6957
6958         /*
6959          * Effective values of the reclaim targets are ignored so they
6960          * can be stale. Have a look at mem_cgroup_protection for more
6961          * details.
6962          * TODO: calculation should be more robust so that we do not need
6963          * that special casing.
6964          */
6965         if (memcg == root)
6966                 return;
6967
6968         usage = page_counter_read(&memcg->memory);
6969         if (!usage)
6970                 return;
6971
6972         parent = parent_mem_cgroup(memcg);
6973
6974         if (parent == root) {
6975                 memcg->memory.emin = READ_ONCE(memcg->memory.min);
6976                 memcg->memory.elow = READ_ONCE(memcg->memory.low);
6977                 return;
6978         }
6979
6980         parent_usage = page_counter_read(&parent->memory);
6981
6982         WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6983                         READ_ONCE(memcg->memory.min),
6984                         READ_ONCE(parent->memory.emin),
6985                         atomic_long_read(&parent->memory.children_min_usage)));
6986
6987         WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6988                         READ_ONCE(memcg->memory.low),
6989                         READ_ONCE(parent->memory.elow),
6990                         atomic_long_read(&parent->memory.children_low_usage)));
6991 }
6992
6993 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
6994                         gfp_t gfp)
6995 {
6996         long nr_pages = folio_nr_pages(folio);
6997         int ret;
6998
6999         ret = try_charge(memcg, gfp, nr_pages);
7000         if (ret)
7001                 goto out;
7002
7003         css_get(&memcg->css);
7004         commit_charge(folio, memcg);
7005
7006         local_irq_disable();
7007         mem_cgroup_charge_statistics(memcg, nr_pages);
7008         memcg_check_events(memcg, folio_nid(folio));
7009         local_irq_enable();
7010 out:
7011         return ret;
7012 }
7013
7014 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
7015 {
7016         struct mem_cgroup *memcg;
7017         int ret;
7018
7019         memcg = get_mem_cgroup_from_mm(mm);
7020         ret = charge_memcg(folio, memcg, gfp);
7021         css_put(&memcg->css);
7022
7023         return ret;
7024 }
7025
7026 /**
7027  * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin.
7028  * @folio: folio to charge.
7029  * @mm: mm context of the victim
7030  * @gfp: reclaim mode
7031  * @entry: swap entry for which the folio is allocated
7032  *
7033  * This function charges a folio allocated for swapin. Please call this before
7034  * adding the folio to the swapcache.
7035  *
7036  * Returns 0 on success. Otherwise, an error code is returned.
7037  */
7038 int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
7039                                   gfp_t gfp, swp_entry_t entry)
7040 {
7041         struct mem_cgroup *memcg;
7042         unsigned short id;
7043         int ret;
7044
7045         if (mem_cgroup_disabled())
7046                 return 0;
7047
7048         id = lookup_swap_cgroup_id(entry);
7049         rcu_read_lock();
7050         memcg = mem_cgroup_from_id(id);
7051         if (!memcg || !css_tryget_online(&memcg->css))
7052                 memcg = get_mem_cgroup_from_mm(mm);
7053         rcu_read_unlock();
7054
7055         ret = charge_memcg(folio, memcg, gfp);
7056
7057         css_put(&memcg->css);
7058         return ret;
7059 }
7060
7061 /*
7062  * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
7063  * @entry: swap entry for which the page is charged
7064  *
7065  * Call this function after successfully adding the charged page to swapcache.
7066  *
7067  * Note: This function assumes the page for which swap slot is being uncharged
7068  * is order 0 page.
7069  */
7070 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
7071 {
7072         /*
7073          * Cgroup1's unified memory+swap counter has been charged with the
7074          * new swapcache page, finish the transfer by uncharging the swap
7075          * slot. The swap slot would also get uncharged when it dies, but
7076          * it can stick around indefinitely and we'd count the page twice
7077          * the entire time.
7078          *
7079          * Cgroup2 has separate resource counters for memory and swap,
7080          * so this is a non-issue here. Memory and swap charge lifetimes
7081          * correspond 1:1 to page and swap slot lifetimes: we charge the
7082          * page to memory here, and uncharge swap when the slot is freed.
7083          */
7084         if (!mem_cgroup_disabled() && do_memsw_account()) {
7085                 /*
7086                  * The swap entry might not get freed for a long time,
7087                  * let's not wait for it.  The page already received a
7088                  * memory+swap charge, drop the swap entry duplicate.
7089                  */
7090                 mem_cgroup_uncharge_swap(entry, 1);
7091         }
7092 }
7093
7094 struct uncharge_gather {
7095         struct mem_cgroup *memcg;
7096         unsigned long nr_memory;
7097         unsigned long pgpgout;
7098         unsigned long nr_kmem;
7099         int nid;
7100 };
7101
7102 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
7103 {
7104         memset(ug, 0, sizeof(*ug));
7105 }
7106
7107 static void uncharge_batch(const struct uncharge_gather *ug)
7108 {
7109         unsigned long flags;
7110
7111         if (ug->nr_memory) {
7112                 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
7113                 if (do_memsw_account())
7114                         page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
7115                 if (ug->nr_kmem)
7116                         memcg_account_kmem(ug->memcg, -ug->nr_kmem);
7117                 memcg_oom_recover(ug->memcg);
7118         }
7119
7120         local_irq_save(flags);
7121         __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
7122         __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
7123         memcg_check_events(ug->memcg, ug->nid);
7124         local_irq_restore(flags);
7125
7126         /* drop reference from uncharge_folio */
7127         css_put(&ug->memcg->css);
7128 }
7129
7130 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
7131 {
7132         long nr_pages;
7133         struct mem_cgroup *memcg;
7134         struct obj_cgroup *objcg;
7135
7136         VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7137
7138         /*
7139          * Nobody should be changing or seriously looking at
7140          * folio memcg or objcg at this point, we have fully
7141          * exclusive access to the folio.
7142          */
7143         if (folio_memcg_kmem(folio)) {
7144                 objcg = __folio_objcg(folio);
7145                 /*
7146                  * This get matches the put at the end of the function and
7147                  * kmem pages do not hold memcg references anymore.
7148                  */
7149                 memcg = get_mem_cgroup_from_objcg(objcg);
7150         } else {
7151                 memcg = __folio_memcg(folio);
7152         }
7153
7154         if (!memcg)
7155                 return;
7156
7157         if (ug->memcg != memcg) {
7158                 if (ug->memcg) {
7159                         uncharge_batch(ug);
7160                         uncharge_gather_clear(ug);
7161                 }
7162                 ug->memcg = memcg;
7163                 ug->nid = folio_nid(folio);
7164
7165                 /* pairs with css_put in uncharge_batch */
7166                 css_get(&memcg->css);
7167         }
7168
7169         nr_pages = folio_nr_pages(folio);
7170
7171         if (folio_memcg_kmem(folio)) {
7172                 ug->nr_memory += nr_pages;
7173                 ug->nr_kmem += nr_pages;
7174
7175                 folio->memcg_data = 0;
7176                 obj_cgroup_put(objcg);
7177         } else {
7178                 /* LRU pages aren't accounted at the root level */
7179                 if (!mem_cgroup_is_root(memcg))
7180                         ug->nr_memory += nr_pages;
7181                 ug->pgpgout++;
7182
7183                 folio->memcg_data = 0;
7184         }
7185
7186         css_put(&memcg->css);
7187 }
7188
7189 void __mem_cgroup_uncharge(struct folio *folio)
7190 {
7191         struct uncharge_gather ug;
7192
7193         /* Don't touch folio->lru of any random page, pre-check: */
7194         if (!folio_memcg(folio))
7195                 return;
7196
7197         uncharge_gather_clear(&ug);
7198         uncharge_folio(folio, &ug);
7199         uncharge_batch(&ug);
7200 }
7201
7202 /**
7203  * __mem_cgroup_uncharge_list - uncharge a list of page
7204  * @page_list: list of pages to uncharge
7205  *
7206  * Uncharge a list of pages previously charged with
7207  * __mem_cgroup_charge().
7208  */
7209 void __mem_cgroup_uncharge_list(struct list_head *page_list)
7210 {
7211         struct uncharge_gather ug;
7212         struct folio *folio;
7213
7214         uncharge_gather_clear(&ug);
7215         list_for_each_entry(folio, page_list, lru)
7216                 uncharge_folio(folio, &ug);
7217         if (ug.memcg)
7218                 uncharge_batch(&ug);
7219 }
7220
7221 /**
7222  * mem_cgroup_migrate - Charge a folio's replacement.
7223  * @old: Currently circulating folio.
7224  * @new: Replacement folio.
7225  *
7226  * Charge @new as a replacement folio for @old. @old will
7227  * be uncharged upon free.
7228  *
7229  * Both folios must be locked, @new->mapping must be set up.
7230  */
7231 void mem_cgroup_migrate(struct folio *old, struct folio *new)
7232 {
7233         struct mem_cgroup *memcg;
7234         long nr_pages = folio_nr_pages(new);
7235         unsigned long flags;
7236
7237         VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
7238         VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
7239         VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
7240         VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
7241
7242         if (mem_cgroup_disabled())
7243                 return;
7244
7245         /* Page cache replacement: new folio already charged? */
7246         if (folio_memcg(new))
7247                 return;
7248
7249         memcg = folio_memcg(old);
7250         VM_WARN_ON_ONCE_FOLIO(!memcg, old);
7251         if (!memcg)
7252                 return;
7253
7254         /* Force-charge the new page. The old one will be freed soon */
7255         if (!mem_cgroup_is_root(memcg)) {
7256                 page_counter_charge(&memcg->memory, nr_pages);
7257                 if (do_memsw_account())
7258                         page_counter_charge(&memcg->memsw, nr_pages);
7259         }
7260
7261         css_get(&memcg->css);
7262         commit_charge(new, memcg);
7263
7264         local_irq_save(flags);
7265         mem_cgroup_charge_statistics(memcg, nr_pages);
7266         memcg_check_events(memcg, folio_nid(new));
7267         local_irq_restore(flags);
7268 }
7269
7270 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7271 EXPORT_SYMBOL(memcg_sockets_enabled_key);
7272
7273 void mem_cgroup_sk_alloc(struct sock *sk)
7274 {
7275         struct mem_cgroup *memcg;
7276
7277         if (!mem_cgroup_sockets_enabled)
7278                 return;
7279
7280         /* Do not associate the sock with unrelated interrupted task's memcg. */
7281         if (!in_task())
7282                 return;
7283
7284         rcu_read_lock();
7285         memcg = mem_cgroup_from_task(current);
7286         if (mem_cgroup_is_root(memcg))
7287                 goto out;
7288         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7289                 goto out;
7290         if (css_tryget(&memcg->css))
7291                 sk->sk_memcg = memcg;
7292 out:
7293         rcu_read_unlock();
7294 }
7295
7296 void mem_cgroup_sk_free(struct sock *sk)
7297 {
7298         if (sk->sk_memcg)
7299                 css_put(&sk->sk_memcg->css);
7300 }
7301
7302 /**
7303  * mem_cgroup_charge_skmem - charge socket memory
7304  * @memcg: memcg to charge
7305  * @nr_pages: number of pages to charge
7306  * @gfp_mask: reclaim mode
7307  *
7308  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7309  * @memcg's configured limit, %false if it doesn't.
7310  */
7311 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
7312                              gfp_t gfp_mask)
7313 {
7314         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7315                 struct page_counter *fail;
7316
7317                 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7318                         memcg->tcpmem_pressure = 0;
7319                         return true;
7320                 }
7321                 memcg->tcpmem_pressure = 1;
7322                 if (gfp_mask & __GFP_NOFAIL) {
7323                         page_counter_charge(&memcg->tcpmem, nr_pages);
7324                         return true;
7325                 }
7326                 return false;
7327         }
7328
7329         if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
7330                 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7331                 return true;
7332         }
7333
7334         return false;
7335 }
7336
7337 /**
7338  * mem_cgroup_uncharge_skmem - uncharge socket memory
7339  * @memcg: memcg to uncharge
7340  * @nr_pages: number of pages to uncharge
7341  */
7342 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7343 {
7344         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7345                 page_counter_uncharge(&memcg->tcpmem, nr_pages);
7346                 return;
7347         }
7348
7349         mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7350
7351         refill_stock(memcg, nr_pages);
7352 }
7353
7354 static int __init cgroup_memory(char *s)
7355 {
7356         char *token;
7357
7358         while ((token = strsep(&s, ",")) != NULL) {
7359                 if (!*token)
7360                         continue;
7361                 if (!strcmp(token, "nosocket"))
7362                         cgroup_memory_nosocket = true;
7363                 if (!strcmp(token, "nokmem"))
7364                         cgroup_memory_nokmem = true;
7365                 if (!strcmp(token, "nobpf"))
7366                         cgroup_memory_nobpf = true;
7367         }
7368         return 1;
7369 }
7370 __setup("cgroup.memory=", cgroup_memory);
7371
7372 /*
7373  * subsys_initcall() for memory controller.
7374  *
7375  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7376  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7377  * basically everything that doesn't depend on a specific mem_cgroup structure
7378  * should be initialized from here.
7379  */
7380 static int __init mem_cgroup_init(void)
7381 {
7382         int cpu, node;
7383
7384         /*
7385          * Currently s32 type (can refer to struct batched_lruvec_stat) is
7386          * used for per-memcg-per-cpu caching of per-node statistics. In order
7387          * to work fine, we should make sure that the overfill threshold can't
7388          * exceed S32_MAX / PAGE_SIZE.
7389          */
7390         BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7391
7392         cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7393                                   memcg_hotplug_cpu_dead);
7394
7395         for_each_possible_cpu(cpu)
7396                 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7397                           drain_local_stock);
7398
7399         for_each_node(node) {
7400                 struct mem_cgroup_tree_per_node *rtpn;
7401
7402                 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node);
7403
7404                 rtpn->rb_root = RB_ROOT;
7405                 rtpn->rb_rightmost = NULL;
7406                 spin_lock_init(&rtpn->lock);
7407                 soft_limit_tree.rb_tree_per_node[node] = rtpn;
7408         }
7409
7410         return 0;
7411 }
7412 subsys_initcall(mem_cgroup_init);
7413
7414 #ifdef CONFIG_SWAP
7415 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7416 {
7417         while (!refcount_inc_not_zero(&memcg->id.ref)) {
7418                 /*
7419                  * The root cgroup cannot be destroyed, so it's refcount must
7420                  * always be >= 1.
7421                  */
7422                 if (WARN_ON_ONCE(mem_cgroup_is_root(memcg))) {
7423                         VM_BUG_ON(1);
7424                         break;
7425                 }
7426                 memcg = parent_mem_cgroup(memcg);
7427                 if (!memcg)
7428                         memcg = root_mem_cgroup;
7429         }
7430         return memcg;
7431 }
7432
7433 /**
7434  * mem_cgroup_swapout - transfer a memsw charge to swap
7435  * @folio: folio whose memsw charge to transfer
7436  * @entry: swap entry to move the charge to
7437  *
7438  * Transfer the memsw charge of @folio to @entry.
7439  */
7440 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7441 {
7442         struct mem_cgroup *memcg, *swap_memcg;
7443         unsigned int nr_entries;
7444         unsigned short oldid;
7445
7446         VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7447         VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7448
7449         if (mem_cgroup_disabled())
7450                 return;
7451
7452         if (!do_memsw_account())
7453                 return;
7454
7455         memcg = folio_memcg(folio);
7456
7457         VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7458         if (!memcg)
7459                 return;
7460
7461         /*
7462          * In case the memcg owning these pages has been offlined and doesn't
7463          * have an ID allocated to it anymore, charge the closest online
7464          * ancestor for the swap instead and transfer the memory+swap charge.
7465          */
7466         swap_memcg = mem_cgroup_id_get_online(memcg);
7467         nr_entries = folio_nr_pages(folio);
7468         /* Get references for the tail pages, too */
7469         if (nr_entries > 1)
7470                 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7471         oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7472                                    nr_entries);
7473         VM_BUG_ON_FOLIO(oldid, folio);
7474         mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7475
7476         folio->memcg_data = 0;
7477
7478         if (!mem_cgroup_is_root(memcg))
7479                 page_counter_uncharge(&memcg->memory, nr_entries);
7480
7481         if (memcg != swap_memcg) {
7482                 if (!mem_cgroup_is_root(swap_memcg))
7483                         page_counter_charge(&swap_memcg->memsw, nr_entries);
7484                 page_counter_uncharge(&memcg->memsw, nr_entries);
7485         }
7486
7487         /*
7488          * Interrupts should be disabled here because the caller holds the
7489          * i_pages lock which is taken with interrupts-off. It is
7490          * important here to have the interrupts disabled because it is the
7491          * only synchronisation we have for updating the per-CPU variables.
7492          */
7493         memcg_stats_lock();
7494         mem_cgroup_charge_statistics(memcg, -nr_entries);
7495         memcg_stats_unlock();
7496         memcg_check_events(memcg, folio_nid(folio));
7497
7498         css_put(&memcg->css);
7499 }
7500
7501 /**
7502  * __mem_cgroup_try_charge_swap - try charging swap space for a folio
7503  * @folio: folio being added to swap
7504  * @entry: swap entry to charge
7505  *
7506  * Try to charge @folio's memcg for the swap space at @entry.
7507  *
7508  * Returns 0 on success, -ENOMEM on failure.
7509  */
7510 int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry)
7511 {
7512         unsigned int nr_pages = folio_nr_pages(folio);
7513         struct page_counter *counter;
7514         struct mem_cgroup *memcg;
7515         unsigned short oldid;
7516
7517         if (do_memsw_account())
7518                 return 0;
7519
7520         memcg = folio_memcg(folio);
7521
7522         VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7523         if (!memcg)
7524                 return 0;
7525
7526         if (!entry.val) {
7527                 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7528                 return 0;
7529         }
7530
7531         memcg = mem_cgroup_id_get_online(memcg);
7532
7533         if (!mem_cgroup_is_root(memcg) &&
7534             !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7535                 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7536                 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7537                 mem_cgroup_id_put(memcg);
7538                 return -ENOMEM;
7539         }
7540
7541         /* Get references for the tail pages, too */
7542         if (nr_pages > 1)
7543                 mem_cgroup_id_get_many(memcg, nr_pages - 1);
7544         oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7545         VM_BUG_ON_FOLIO(oldid, folio);
7546         mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7547
7548         return 0;
7549 }
7550
7551 /**
7552  * __mem_cgroup_uncharge_swap - uncharge swap space
7553  * @entry: swap entry to uncharge
7554  * @nr_pages: the amount of swap space to uncharge
7555  */
7556 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7557 {
7558         struct mem_cgroup *memcg;
7559         unsigned short id;
7560
7561         id = swap_cgroup_record(entry, 0, nr_pages);
7562         rcu_read_lock();
7563         memcg = mem_cgroup_from_id(id);
7564         if (memcg) {
7565                 if (!mem_cgroup_is_root(memcg)) {
7566                         if (do_memsw_account())
7567                                 page_counter_uncharge(&memcg->memsw, nr_pages);
7568                         else
7569                                 page_counter_uncharge(&memcg->swap, nr_pages);
7570                 }
7571                 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7572                 mem_cgroup_id_put_many(memcg, nr_pages);
7573         }
7574         rcu_read_unlock();
7575 }
7576
7577 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7578 {
7579         long nr_swap_pages = get_nr_swap_pages();
7580
7581         if (mem_cgroup_disabled() || do_memsw_account())
7582                 return nr_swap_pages;
7583         for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg))
7584                 nr_swap_pages = min_t(long, nr_swap_pages,
7585                                       READ_ONCE(memcg->swap.max) -
7586                                       page_counter_read(&memcg->swap));
7587         return nr_swap_pages;
7588 }
7589
7590 bool mem_cgroup_swap_full(struct folio *folio)
7591 {
7592         struct mem_cgroup *memcg;
7593
7594         VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
7595
7596         if (vm_swap_full())
7597                 return true;
7598         if (do_memsw_account())
7599                 return false;
7600
7601         memcg = folio_memcg(folio);
7602         if (!memcg)
7603                 return false;
7604
7605         for (; !mem_cgroup_is_root(memcg); memcg = parent_mem_cgroup(memcg)) {
7606                 unsigned long usage = page_counter_read(&memcg->swap);
7607
7608                 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7609                     usage * 2 >= READ_ONCE(memcg->swap.max))
7610                         return true;
7611         }
7612
7613         return false;
7614 }
7615
7616 static int __init setup_swap_account(char *s)
7617 {
7618         pr_warn_once("The swapaccount= commandline option is deprecated. "
7619                      "Please report your usecase to linux-mm@kvack.org if you "
7620                      "depend on this functionality.\n");
7621         return 1;
7622 }
7623 __setup("swapaccount=", setup_swap_account);
7624
7625 static u64 swap_current_read(struct cgroup_subsys_state *css,
7626                              struct cftype *cft)
7627 {
7628         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7629
7630         return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7631 }
7632
7633 static u64 swap_peak_read(struct cgroup_subsys_state *css,
7634                           struct cftype *cft)
7635 {
7636         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7637
7638         return (u64)memcg->swap.watermark * PAGE_SIZE;
7639 }
7640
7641 static int swap_high_show(struct seq_file *m, void *v)
7642 {
7643         return seq_puts_memcg_tunable(m,
7644                 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7645 }
7646
7647 static ssize_t swap_high_write(struct kernfs_open_file *of,
7648                                char *buf, size_t nbytes, loff_t off)
7649 {
7650         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7651         unsigned long high;
7652         int err;
7653
7654         buf = strstrip(buf);
7655         err = page_counter_memparse(buf, "max", &high);
7656         if (err)
7657                 return err;
7658
7659         page_counter_set_high(&memcg->swap, high);
7660
7661         return nbytes;
7662 }
7663
7664 static int swap_max_show(struct seq_file *m, void *v)
7665 {
7666         return seq_puts_memcg_tunable(m,
7667                 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7668 }
7669
7670 static ssize_t swap_max_write(struct kernfs_open_file *of,
7671                               char *buf, size_t nbytes, loff_t off)
7672 {
7673         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7674         unsigned long max;
7675         int err;
7676
7677         buf = strstrip(buf);
7678         err = page_counter_memparse(buf, "max", &max);
7679         if (err)
7680                 return err;
7681
7682         xchg(&memcg->swap.max, max);
7683
7684         return nbytes;
7685 }
7686
7687 static int swap_events_show(struct seq_file *m, void *v)
7688 {
7689         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7690
7691         seq_printf(m, "high %lu\n",
7692                    atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7693         seq_printf(m, "max %lu\n",
7694                    atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7695         seq_printf(m, "fail %lu\n",
7696                    atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7697
7698         return 0;
7699 }
7700
7701 static struct cftype swap_files[] = {
7702         {
7703                 .name = "swap.current",
7704                 .flags = CFTYPE_NOT_ON_ROOT,
7705                 .read_u64 = swap_current_read,
7706         },
7707         {
7708                 .name = "swap.high",
7709                 .flags = CFTYPE_NOT_ON_ROOT,
7710                 .seq_show = swap_high_show,
7711                 .write = swap_high_write,
7712         },
7713         {
7714                 .name = "swap.max",
7715                 .flags = CFTYPE_NOT_ON_ROOT,
7716                 .seq_show = swap_max_show,
7717                 .write = swap_max_write,
7718         },
7719         {
7720                 .name = "swap.peak",
7721                 .flags = CFTYPE_NOT_ON_ROOT,
7722                 .read_u64 = swap_peak_read,
7723         },
7724         {
7725                 .name = "swap.events",
7726                 .flags = CFTYPE_NOT_ON_ROOT,
7727                 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
7728                 .seq_show = swap_events_show,
7729         },
7730         { }     /* terminate */
7731 };
7732
7733 static struct cftype memsw_files[] = {
7734         {
7735                 .name = "memsw.usage_in_bytes",
7736                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7737                 .read_u64 = mem_cgroup_read_u64,
7738         },
7739         {
7740                 .name = "memsw.max_usage_in_bytes",
7741                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7742                 .write = mem_cgroup_reset,
7743                 .read_u64 = mem_cgroup_read_u64,
7744         },
7745         {
7746                 .name = "memsw.limit_in_bytes",
7747                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7748                 .write = mem_cgroup_write,
7749                 .read_u64 = mem_cgroup_read_u64,
7750         },
7751         {
7752                 .name = "memsw.failcnt",
7753                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7754                 .write = mem_cgroup_reset,
7755                 .read_u64 = mem_cgroup_read_u64,
7756         },
7757         { },    /* terminate */
7758 };
7759
7760 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
7761 /**
7762  * obj_cgroup_may_zswap - check if this cgroup can zswap
7763  * @objcg: the object cgroup
7764  *
7765  * Check if the hierarchical zswap limit has been reached.
7766  *
7767  * This doesn't check for specific headroom, and it is not atomic
7768  * either. But with zswap, the size of the allocation is only known
7769  * once compression has occured, and this optimistic pre-check avoids
7770  * spending cycles on compression when there is already no room left
7771  * or zswap is disabled altogether somewhere in the hierarchy.
7772  */
7773 bool obj_cgroup_may_zswap(struct obj_cgroup *objcg)
7774 {
7775         struct mem_cgroup *memcg, *original_memcg;
7776         bool ret = true;
7777
7778         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7779                 return true;
7780
7781         original_memcg = get_mem_cgroup_from_objcg(objcg);
7782         for (memcg = original_memcg; !mem_cgroup_is_root(memcg);
7783              memcg = parent_mem_cgroup(memcg)) {
7784                 unsigned long max = READ_ONCE(memcg->zswap_max);
7785                 unsigned long pages;
7786
7787                 if (max == PAGE_COUNTER_MAX)
7788                         continue;
7789                 if (max == 0) {
7790                         ret = false;
7791                         break;
7792                 }
7793
7794                 cgroup_rstat_flush(memcg->css.cgroup);
7795                 pages = memcg_page_state(memcg, MEMCG_ZSWAP_B) / PAGE_SIZE;
7796                 if (pages < max)
7797                         continue;
7798                 ret = false;
7799                 break;
7800         }
7801         mem_cgroup_put(original_memcg);
7802         return ret;
7803 }
7804
7805 /**
7806  * obj_cgroup_charge_zswap - charge compression backend memory
7807  * @objcg: the object cgroup
7808  * @size: size of compressed object
7809  *
7810  * This forces the charge after obj_cgroup_may_zswap() allowed
7811  * compression and storage in zwap for this cgroup to go ahead.
7812  */
7813 void obj_cgroup_charge_zswap(struct obj_cgroup *objcg, size_t size)
7814 {
7815         struct mem_cgroup *memcg;
7816
7817         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7818                 return;
7819
7820         VM_WARN_ON_ONCE(!(current->flags & PF_MEMALLOC));
7821
7822         /* PF_MEMALLOC context, charging must succeed */
7823         if (obj_cgroup_charge(objcg, GFP_KERNEL, size))
7824                 VM_WARN_ON_ONCE(1);
7825
7826         rcu_read_lock();
7827         memcg = obj_cgroup_memcg(objcg);
7828         mod_memcg_state(memcg, MEMCG_ZSWAP_B, size);
7829         mod_memcg_state(memcg, MEMCG_ZSWAPPED, 1);
7830         rcu_read_unlock();
7831 }
7832
7833 /**
7834  * obj_cgroup_uncharge_zswap - uncharge compression backend memory
7835  * @objcg: the object cgroup
7836  * @size: size of compressed object
7837  *
7838  * Uncharges zswap memory on page in.
7839  */
7840 void obj_cgroup_uncharge_zswap(struct obj_cgroup *objcg, size_t size)
7841 {
7842         struct mem_cgroup *memcg;
7843
7844         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7845                 return;
7846
7847         obj_cgroup_uncharge(objcg, size);
7848
7849         rcu_read_lock();
7850         memcg = obj_cgroup_memcg(objcg);
7851         mod_memcg_state(memcg, MEMCG_ZSWAP_B, -size);
7852         mod_memcg_state(memcg, MEMCG_ZSWAPPED, -1);
7853         rcu_read_unlock();
7854 }
7855
7856 static u64 zswap_current_read(struct cgroup_subsys_state *css,
7857                               struct cftype *cft)
7858 {
7859         cgroup_rstat_flush(css->cgroup);
7860         return memcg_page_state(mem_cgroup_from_css(css), MEMCG_ZSWAP_B);
7861 }
7862
7863 static int zswap_max_show(struct seq_file *m, void *v)
7864 {
7865         return seq_puts_memcg_tunable(m,
7866                 READ_ONCE(mem_cgroup_from_seq(m)->zswap_max));
7867 }
7868
7869 static ssize_t zswap_max_write(struct kernfs_open_file *of,
7870                                char *buf, size_t nbytes, loff_t off)
7871 {
7872         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7873         unsigned long max;
7874         int err;
7875
7876         buf = strstrip(buf);
7877         err = page_counter_memparse(buf, "max", &max);
7878         if (err)
7879                 return err;
7880
7881         xchg(&memcg->zswap_max, max);
7882
7883         return nbytes;
7884 }
7885
7886 static struct cftype zswap_files[] = {
7887         {
7888                 .name = "zswap.current",
7889                 .flags = CFTYPE_NOT_ON_ROOT,
7890                 .read_u64 = zswap_current_read,
7891         },
7892         {
7893                 .name = "zswap.max",
7894                 .flags = CFTYPE_NOT_ON_ROOT,
7895                 .seq_show = zswap_max_show,
7896                 .write = zswap_max_write,
7897         },
7898         { }     /* terminate */
7899 };
7900 #endif /* CONFIG_MEMCG_KMEM && CONFIG_ZSWAP */
7901
7902 static int __init mem_cgroup_swap_init(void)
7903 {
7904         if (mem_cgroup_disabled())
7905                 return 0;
7906
7907         WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7908         WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7909 #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_ZSWAP)
7910         WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, zswap_files));
7911 #endif
7912         return 0;
7913 }
7914 subsys_initcall(mem_cgroup_swap_init);
7915
7916 #endif /* CONFIG_SWAP */