memcg: sync flush only if periodic flush is delayed
[linux-2.6-microblaze.git] / mm / memcontrol.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  *
24  * Per memcg lru locking
25  * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26  */
27
28 #include <linux/page_counter.h>
29 #include <linux/memcontrol.h>
30 #include <linux/cgroup.h>
31 #include <linux/pagewalk.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/vm_event_item.h>
37 #include <linux/smp.h>
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
40 #include <linux/bit_spinlock.h>
41 #include <linux/rcupdate.h>
42 #include <linux/limits.h>
43 #include <linux/export.h>
44 #include <linux/mutex.h>
45 #include <linux/rbtree.h>
46 #include <linux/slab.h>
47 #include <linux/swap.h>
48 #include <linux/swapops.h>
49 #include <linux/spinlock.h>
50 #include <linux/eventfd.h>
51 #include <linux/poll.h>
52 #include <linux/sort.h>
53 #include <linux/fs.h>
54 #include <linux/seq_file.h>
55 #include <linux/vmpressure.h>
56 #include <linux/memremap.h>
57 #include <linux/mm_inline.h>
58 #include <linux/swap_cgroup.h>
59 #include <linux/cpu.h>
60 #include <linux/oom.h>
61 #include <linux/lockdep.h>
62 #include <linux/file.h>
63 #include <linux/resume_user_mode.h>
64 #include <linux/psi.h>
65 #include <linux/seq_buf.h>
66 #include "internal.h"
67 #include <net/sock.h>
68 #include <net/ip.h>
69 #include "slab.h"
70
71 #include <linux/uaccess.h>
72
73 #include <trace/events/vmscan.h>
74
75 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
76 EXPORT_SYMBOL(memory_cgrp_subsys);
77
78 struct mem_cgroup *root_mem_cgroup __read_mostly;
79
80 /* Active memory cgroup to use from an interrupt context */
81 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
82 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
83
84 /* Socket memory accounting disabled? */
85 static bool cgroup_memory_nosocket __ro_after_init;
86
87 /* Kernel memory accounting disabled? */
88 static bool cgroup_memory_nokmem __ro_after_init;
89
90 /* Whether the swap controller is active */
91 #ifdef CONFIG_MEMCG_SWAP
92 bool cgroup_memory_noswap __ro_after_init;
93 #else
94 #define cgroup_memory_noswap            1
95 #endif
96
97 #ifdef CONFIG_CGROUP_WRITEBACK
98 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
99 #endif
100
101 /* Whether legacy memory+swap accounting is active */
102 static bool do_memsw_account(void)
103 {
104         return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
105 }
106
107 #define THRESHOLDS_EVENTS_TARGET 128
108 #define SOFTLIMIT_EVENTS_TARGET 1024
109
110 /*
111  * Cgroups above their limits are maintained in a RB-Tree, independent of
112  * their hierarchy representation
113  */
114
115 struct mem_cgroup_tree_per_node {
116         struct rb_root rb_root;
117         struct rb_node *rb_rightmost;
118         spinlock_t lock;
119 };
120
121 struct mem_cgroup_tree {
122         struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
123 };
124
125 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
126
127 /* for OOM */
128 struct mem_cgroup_eventfd_list {
129         struct list_head list;
130         struct eventfd_ctx *eventfd;
131 };
132
133 /*
134  * cgroup_event represents events which userspace want to receive.
135  */
136 struct mem_cgroup_event {
137         /*
138          * memcg which the event belongs to.
139          */
140         struct mem_cgroup *memcg;
141         /*
142          * eventfd to signal userspace about the event.
143          */
144         struct eventfd_ctx *eventfd;
145         /*
146          * Each of these stored in a list by the cgroup.
147          */
148         struct list_head list;
149         /*
150          * register_event() callback will be used to add new userspace
151          * waiter for changes related to this event.  Use eventfd_signal()
152          * on eventfd to send notification to userspace.
153          */
154         int (*register_event)(struct mem_cgroup *memcg,
155                               struct eventfd_ctx *eventfd, const char *args);
156         /*
157          * unregister_event() callback will be called when userspace closes
158          * the eventfd or on cgroup removing.  This callback must be set,
159          * if you want provide notification functionality.
160          */
161         void (*unregister_event)(struct mem_cgroup *memcg,
162                                  struct eventfd_ctx *eventfd);
163         /*
164          * All fields below needed to unregister event when
165          * userspace closes eventfd.
166          */
167         poll_table pt;
168         wait_queue_head_t *wqh;
169         wait_queue_entry_t wait;
170         struct work_struct remove;
171 };
172
173 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
174 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
175
176 /* Stuffs for move charges at task migration. */
177 /*
178  * Types of charges to be moved.
179  */
180 #define MOVE_ANON       0x1U
181 #define MOVE_FILE       0x2U
182 #define MOVE_MASK       (MOVE_ANON | MOVE_FILE)
183
184 /* "mc" and its members are protected by cgroup_mutex */
185 static struct move_charge_struct {
186         spinlock_t        lock; /* for from, to */
187         struct mm_struct  *mm;
188         struct mem_cgroup *from;
189         struct mem_cgroup *to;
190         unsigned long flags;
191         unsigned long precharge;
192         unsigned long moved_charge;
193         unsigned long moved_swap;
194         struct task_struct *moving_task;        /* a task moving charges */
195         wait_queue_head_t waitq;                /* a waitq for other context */
196 } mc = {
197         .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
198         .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
199 };
200
201 /*
202  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
203  * limit reclaim to prevent infinite loops, if they ever occur.
204  */
205 #define MEM_CGROUP_MAX_RECLAIM_LOOPS            100
206 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
207
208 /* for encoding cft->private value on file */
209 enum res_type {
210         _MEM,
211         _MEMSWAP,
212         _OOM_TYPE,
213         _KMEM,
214         _TCP,
215 };
216
217 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
218 #define MEMFILE_TYPE(val)       ((val) >> 16 & 0xffff)
219 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
220 /* Used for OOM notifier */
221 #define OOM_CONTROL             (0)
222
223 /*
224  * Iteration constructs for visiting all cgroups (under a tree).  If
225  * loops are exited prematurely (break), mem_cgroup_iter_break() must
226  * be used for reference counting.
227  */
228 #define for_each_mem_cgroup_tree(iter, root)            \
229         for (iter = mem_cgroup_iter(root, NULL, NULL);  \
230              iter != NULL;                              \
231              iter = mem_cgroup_iter(root, iter, NULL))
232
233 #define for_each_mem_cgroup(iter)                       \
234         for (iter = mem_cgroup_iter(NULL, NULL, NULL);  \
235              iter != NULL;                              \
236              iter = mem_cgroup_iter(NULL, iter, NULL))
237
238 static inline bool task_is_dying(void)
239 {
240         return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
241                 (current->flags & PF_EXITING);
242 }
243
244 /* Some nice accessors for the vmpressure. */
245 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
246 {
247         if (!memcg)
248                 memcg = root_mem_cgroup;
249         return &memcg->vmpressure;
250 }
251
252 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
253 {
254         return container_of(vmpr, struct mem_cgroup, vmpressure);
255 }
256
257 #ifdef CONFIG_MEMCG_KMEM
258 static DEFINE_SPINLOCK(objcg_lock);
259
260 bool mem_cgroup_kmem_disabled(void)
261 {
262         return cgroup_memory_nokmem;
263 }
264
265 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
266                                       unsigned int nr_pages);
267
268 static void obj_cgroup_release(struct percpu_ref *ref)
269 {
270         struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
271         unsigned int nr_bytes;
272         unsigned int nr_pages;
273         unsigned long flags;
274
275         /*
276          * At this point all allocated objects are freed, and
277          * objcg->nr_charged_bytes can't have an arbitrary byte value.
278          * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
279          *
280          * The following sequence can lead to it:
281          * 1) CPU0: objcg == stock->cached_objcg
282          * 2) CPU1: we do a small allocation (e.g. 92 bytes),
283          *          PAGE_SIZE bytes are charged
284          * 3) CPU1: a process from another memcg is allocating something,
285          *          the stock if flushed,
286          *          objcg->nr_charged_bytes = PAGE_SIZE - 92
287          * 5) CPU0: we do release this object,
288          *          92 bytes are added to stock->nr_bytes
289          * 6) CPU0: stock is flushed,
290          *          92 bytes are added to objcg->nr_charged_bytes
291          *
292          * In the result, nr_charged_bytes == PAGE_SIZE.
293          * This page will be uncharged in obj_cgroup_release().
294          */
295         nr_bytes = atomic_read(&objcg->nr_charged_bytes);
296         WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
297         nr_pages = nr_bytes >> PAGE_SHIFT;
298
299         if (nr_pages)
300                 obj_cgroup_uncharge_pages(objcg, nr_pages);
301
302         spin_lock_irqsave(&objcg_lock, flags);
303         list_del(&objcg->list);
304         spin_unlock_irqrestore(&objcg_lock, flags);
305
306         percpu_ref_exit(ref);
307         kfree_rcu(objcg, rcu);
308 }
309
310 static struct obj_cgroup *obj_cgroup_alloc(void)
311 {
312         struct obj_cgroup *objcg;
313         int ret;
314
315         objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
316         if (!objcg)
317                 return NULL;
318
319         ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
320                               GFP_KERNEL);
321         if (ret) {
322                 kfree(objcg);
323                 return NULL;
324         }
325         INIT_LIST_HEAD(&objcg->list);
326         return objcg;
327 }
328
329 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
330                                   struct mem_cgroup *parent)
331 {
332         struct obj_cgroup *objcg, *iter;
333
334         objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
335
336         spin_lock_irq(&objcg_lock);
337
338         /* 1) Ready to reparent active objcg. */
339         list_add(&objcg->list, &memcg->objcg_list);
340         /* 2) Reparent active objcg and already reparented objcgs to parent. */
341         list_for_each_entry(iter, &memcg->objcg_list, list)
342                 WRITE_ONCE(iter->memcg, parent);
343         /* 3) Move already reparented objcgs to the parent's list */
344         list_splice(&memcg->objcg_list, &parent->objcg_list);
345
346         spin_unlock_irq(&objcg_lock);
347
348         percpu_ref_kill(&objcg->refcnt);
349 }
350
351 /*
352  * A lot of the calls to the cache allocation functions are expected to be
353  * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
354  * conditional to this static branch, we'll have to allow modules that does
355  * kmem_cache_alloc and the such to see this symbol as well
356  */
357 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
358 EXPORT_SYMBOL(memcg_kmem_enabled_key);
359 #endif
360
361 /**
362  * mem_cgroup_css_from_page - css of the memcg associated with a page
363  * @page: page of interest
364  *
365  * If memcg is bound to the default hierarchy, css of the memcg associated
366  * with @page is returned.  The returned css remains associated with @page
367  * until it is released.
368  *
369  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
370  * is returned.
371  */
372 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
373 {
374         struct mem_cgroup *memcg;
375
376         memcg = page_memcg(page);
377
378         if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
379                 memcg = root_mem_cgroup;
380
381         return &memcg->css;
382 }
383
384 /**
385  * page_cgroup_ino - return inode number of the memcg a page is charged to
386  * @page: the page
387  *
388  * Look up the closest online ancestor of the memory cgroup @page is charged to
389  * and return its inode number or 0 if @page is not charged to any cgroup. It
390  * is safe to call this function without holding a reference to @page.
391  *
392  * Note, this function is inherently racy, because there is nothing to prevent
393  * the cgroup inode from getting torn down and potentially reallocated a moment
394  * after page_cgroup_ino() returns, so it only should be used by callers that
395  * do not care (such as procfs interfaces).
396  */
397 ino_t page_cgroup_ino(struct page *page)
398 {
399         struct mem_cgroup *memcg;
400         unsigned long ino = 0;
401
402         rcu_read_lock();
403         memcg = page_memcg_check(page);
404
405         while (memcg && !(memcg->css.flags & CSS_ONLINE))
406                 memcg = parent_mem_cgroup(memcg);
407         if (memcg)
408                 ino = cgroup_ino(memcg->css.cgroup);
409         rcu_read_unlock();
410         return ino;
411 }
412
413 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
414                                          struct mem_cgroup_tree_per_node *mctz,
415                                          unsigned long new_usage_in_excess)
416 {
417         struct rb_node **p = &mctz->rb_root.rb_node;
418         struct rb_node *parent = NULL;
419         struct mem_cgroup_per_node *mz_node;
420         bool rightmost = true;
421
422         if (mz->on_tree)
423                 return;
424
425         mz->usage_in_excess = new_usage_in_excess;
426         if (!mz->usage_in_excess)
427                 return;
428         while (*p) {
429                 parent = *p;
430                 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
431                                         tree_node);
432                 if (mz->usage_in_excess < mz_node->usage_in_excess) {
433                         p = &(*p)->rb_left;
434                         rightmost = false;
435                 } else {
436                         p = &(*p)->rb_right;
437                 }
438         }
439
440         if (rightmost)
441                 mctz->rb_rightmost = &mz->tree_node;
442
443         rb_link_node(&mz->tree_node, parent, p);
444         rb_insert_color(&mz->tree_node, &mctz->rb_root);
445         mz->on_tree = true;
446 }
447
448 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
449                                          struct mem_cgroup_tree_per_node *mctz)
450 {
451         if (!mz->on_tree)
452                 return;
453
454         if (&mz->tree_node == mctz->rb_rightmost)
455                 mctz->rb_rightmost = rb_prev(&mz->tree_node);
456
457         rb_erase(&mz->tree_node, &mctz->rb_root);
458         mz->on_tree = false;
459 }
460
461 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
462                                        struct mem_cgroup_tree_per_node *mctz)
463 {
464         unsigned long flags;
465
466         spin_lock_irqsave(&mctz->lock, flags);
467         __mem_cgroup_remove_exceeded(mz, mctz);
468         spin_unlock_irqrestore(&mctz->lock, flags);
469 }
470
471 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
472 {
473         unsigned long nr_pages = page_counter_read(&memcg->memory);
474         unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
475         unsigned long excess = 0;
476
477         if (nr_pages > soft_limit)
478                 excess = nr_pages - soft_limit;
479
480         return excess;
481 }
482
483 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
484 {
485         unsigned long excess;
486         struct mem_cgroup_per_node *mz;
487         struct mem_cgroup_tree_per_node *mctz;
488
489         mctz = soft_limit_tree.rb_tree_per_node[nid];
490         if (!mctz)
491                 return;
492         /*
493          * Necessary to update all ancestors when hierarchy is used.
494          * because their event counter is not touched.
495          */
496         for (; memcg; memcg = parent_mem_cgroup(memcg)) {
497                 mz = memcg->nodeinfo[nid];
498                 excess = soft_limit_excess(memcg);
499                 /*
500                  * We have to update the tree if mz is on RB-tree or
501                  * mem is over its softlimit.
502                  */
503                 if (excess || mz->on_tree) {
504                         unsigned long flags;
505
506                         spin_lock_irqsave(&mctz->lock, flags);
507                         /* if on-tree, remove it */
508                         if (mz->on_tree)
509                                 __mem_cgroup_remove_exceeded(mz, mctz);
510                         /*
511                          * Insert again. mz->usage_in_excess will be updated.
512                          * If excess is 0, no tree ops.
513                          */
514                         __mem_cgroup_insert_exceeded(mz, mctz, excess);
515                         spin_unlock_irqrestore(&mctz->lock, flags);
516                 }
517         }
518 }
519
520 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
521 {
522         struct mem_cgroup_tree_per_node *mctz;
523         struct mem_cgroup_per_node *mz;
524         int nid;
525
526         for_each_node(nid) {
527                 mz = memcg->nodeinfo[nid];
528                 mctz = soft_limit_tree.rb_tree_per_node[nid];
529                 if (mctz)
530                         mem_cgroup_remove_exceeded(mz, mctz);
531         }
532 }
533
534 static struct mem_cgroup_per_node *
535 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
536 {
537         struct mem_cgroup_per_node *mz;
538
539 retry:
540         mz = NULL;
541         if (!mctz->rb_rightmost)
542                 goto done;              /* Nothing to reclaim from */
543
544         mz = rb_entry(mctz->rb_rightmost,
545                       struct mem_cgroup_per_node, tree_node);
546         /*
547          * Remove the node now but someone else can add it back,
548          * we will to add it back at the end of reclaim to its correct
549          * position in the tree.
550          */
551         __mem_cgroup_remove_exceeded(mz, mctz);
552         if (!soft_limit_excess(mz->memcg) ||
553             !css_tryget(&mz->memcg->css))
554                 goto retry;
555 done:
556         return mz;
557 }
558
559 static struct mem_cgroup_per_node *
560 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
561 {
562         struct mem_cgroup_per_node *mz;
563
564         spin_lock_irq(&mctz->lock);
565         mz = __mem_cgroup_largest_soft_limit_node(mctz);
566         spin_unlock_irq(&mctz->lock);
567         return mz;
568 }
569
570 /*
571  * memcg and lruvec stats flushing
572  *
573  * Many codepaths leading to stats update or read are performance sensitive and
574  * adding stats flushing in such codepaths is not desirable. So, to optimize the
575  * flushing the kernel does:
576  *
577  * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
578  *    rstat update tree grow unbounded.
579  *
580  * 2) Flush the stats synchronously on reader side only when there are more than
581  *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
582  *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
583  *    only for 2 seconds due to (1).
584  */
585 static void flush_memcg_stats_dwork(struct work_struct *w);
586 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
587 static DEFINE_SPINLOCK(stats_flush_lock);
588 static DEFINE_PER_CPU(unsigned int, stats_updates);
589 static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
590 static u64 flush_next_time;
591
592 #define FLUSH_TIME (2UL*HZ)
593
594 /*
595  * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
596  * not rely on this as part of an acquired spinlock_t lock. These functions are
597  * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
598  * is sufficient.
599  */
600 static void memcg_stats_lock(void)
601 {
602 #ifdef CONFIG_PREEMPT_RT
603       preempt_disable();
604 #else
605       VM_BUG_ON(!irqs_disabled());
606 #endif
607 }
608
609 static void __memcg_stats_lock(void)
610 {
611 #ifdef CONFIG_PREEMPT_RT
612       preempt_disable();
613 #endif
614 }
615
616 static void memcg_stats_unlock(void)
617 {
618 #ifdef CONFIG_PREEMPT_RT
619       preempt_enable();
620 #endif
621 }
622
623 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
624 {
625         unsigned int x;
626
627         cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
628
629         x = __this_cpu_add_return(stats_updates, abs(val));
630         if (x > MEMCG_CHARGE_BATCH) {
631                 atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold);
632                 __this_cpu_write(stats_updates, 0);
633         }
634 }
635
636 static void __mem_cgroup_flush_stats(void)
637 {
638         unsigned long flag;
639
640         if (!spin_trylock_irqsave(&stats_flush_lock, flag))
641                 return;
642
643         flush_next_time = jiffies_64 + 2*FLUSH_TIME;
644         cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
645         atomic_set(&stats_flush_threshold, 0);
646         spin_unlock_irqrestore(&stats_flush_lock, flag);
647 }
648
649 void mem_cgroup_flush_stats(void)
650 {
651         if (atomic_read(&stats_flush_threshold) > num_online_cpus())
652                 __mem_cgroup_flush_stats();
653 }
654
655 void mem_cgroup_flush_stats_delayed(void)
656 {
657         if (time_after64(jiffies_64, flush_next_time))
658                 mem_cgroup_flush_stats();
659 }
660
661 static void flush_memcg_stats_dwork(struct work_struct *w)
662 {
663         __mem_cgroup_flush_stats();
664         queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME);
665 }
666
667 /**
668  * __mod_memcg_state - update cgroup memory statistics
669  * @memcg: the memory cgroup
670  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
671  * @val: delta to add to the counter, can be negative
672  */
673 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
674 {
675         if (mem_cgroup_disabled())
676                 return;
677
678         __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
679         memcg_rstat_updated(memcg, val);
680 }
681
682 /* idx can be of type enum memcg_stat_item or node_stat_item. */
683 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
684 {
685         long x = 0;
686         int cpu;
687
688         for_each_possible_cpu(cpu)
689                 x += per_cpu(memcg->vmstats_percpu->state[idx], cpu);
690 #ifdef CONFIG_SMP
691         if (x < 0)
692                 x = 0;
693 #endif
694         return x;
695 }
696
697 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
698                               int val)
699 {
700         struct mem_cgroup_per_node *pn;
701         struct mem_cgroup *memcg;
702
703         pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
704         memcg = pn->memcg;
705
706         /*
707          * The caller from rmap relay on disabled preemption becase they never
708          * update their counter from in-interrupt context. For these two
709          * counters we check that the update is never performed from an
710          * interrupt context while other caller need to have disabled interrupt.
711          */
712         __memcg_stats_lock();
713         if (IS_ENABLED(CONFIG_DEBUG_VM) && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
714                 switch (idx) {
715                 case NR_ANON_MAPPED:
716                 case NR_FILE_MAPPED:
717                 case NR_ANON_THPS:
718                 case NR_SHMEM_PMDMAPPED:
719                 case NR_FILE_PMDMAPPED:
720                         WARN_ON_ONCE(!in_task());
721                         break;
722                 default:
723                         WARN_ON_ONCE(!irqs_disabled());
724                 }
725         }
726
727         /* Update memcg */
728         __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
729
730         /* Update lruvec */
731         __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
732
733         memcg_rstat_updated(memcg, val);
734         memcg_stats_unlock();
735 }
736
737 /**
738  * __mod_lruvec_state - update lruvec memory statistics
739  * @lruvec: the lruvec
740  * @idx: the stat item
741  * @val: delta to add to the counter, can be negative
742  *
743  * The lruvec is the intersection of the NUMA node and a cgroup. This
744  * function updates the all three counters that are affected by a
745  * change of state at this level: per-node, per-cgroup, per-lruvec.
746  */
747 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
748                         int val)
749 {
750         /* Update node */
751         __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
752
753         /* Update memcg and lruvec */
754         if (!mem_cgroup_disabled())
755                 __mod_memcg_lruvec_state(lruvec, idx, val);
756 }
757
758 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
759                              int val)
760 {
761         struct page *head = compound_head(page); /* rmap on tail pages */
762         struct mem_cgroup *memcg;
763         pg_data_t *pgdat = page_pgdat(page);
764         struct lruvec *lruvec;
765
766         rcu_read_lock();
767         memcg = page_memcg(head);
768         /* Untracked pages have no memcg, no lruvec. Update only the node */
769         if (!memcg) {
770                 rcu_read_unlock();
771                 __mod_node_page_state(pgdat, idx, val);
772                 return;
773         }
774
775         lruvec = mem_cgroup_lruvec(memcg, pgdat);
776         __mod_lruvec_state(lruvec, idx, val);
777         rcu_read_unlock();
778 }
779 EXPORT_SYMBOL(__mod_lruvec_page_state);
780
781 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
782 {
783         pg_data_t *pgdat = page_pgdat(virt_to_page(p));
784         struct mem_cgroup *memcg;
785         struct lruvec *lruvec;
786
787         rcu_read_lock();
788         memcg = mem_cgroup_from_obj(p);
789
790         /*
791          * Untracked pages have no memcg, no lruvec. Update only the
792          * node. If we reparent the slab objects to the root memcg,
793          * when we free the slab object, we need to update the per-memcg
794          * vmstats to keep it correct for the root memcg.
795          */
796         if (!memcg) {
797                 __mod_node_page_state(pgdat, idx, val);
798         } else {
799                 lruvec = mem_cgroup_lruvec(memcg, pgdat);
800                 __mod_lruvec_state(lruvec, idx, val);
801         }
802         rcu_read_unlock();
803 }
804
805 /**
806  * __count_memcg_events - account VM events in a cgroup
807  * @memcg: the memory cgroup
808  * @idx: the event item
809  * @count: the number of events that occurred
810  */
811 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
812                           unsigned long count)
813 {
814         if (mem_cgroup_disabled())
815                 return;
816
817         memcg_stats_lock();
818         __this_cpu_add(memcg->vmstats_percpu->events[idx], count);
819         memcg_rstat_updated(memcg, count);
820         memcg_stats_unlock();
821 }
822
823 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
824 {
825         return READ_ONCE(memcg->vmstats.events[event]);
826 }
827
828 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
829 {
830         long x = 0;
831         int cpu;
832
833         for_each_possible_cpu(cpu)
834                 x += per_cpu(memcg->vmstats_percpu->events[event], cpu);
835         return x;
836 }
837
838 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
839                                          int nr_pages)
840 {
841         /* pagein of a big page is an event. So, ignore page size */
842         if (nr_pages > 0)
843                 __count_memcg_events(memcg, PGPGIN, 1);
844         else {
845                 __count_memcg_events(memcg, PGPGOUT, 1);
846                 nr_pages = -nr_pages; /* for event */
847         }
848
849         __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
850 }
851
852 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
853                                        enum mem_cgroup_events_target target)
854 {
855         unsigned long val, next;
856
857         val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
858         next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
859         /* from time_after() in jiffies.h */
860         if ((long)(next - val) < 0) {
861                 switch (target) {
862                 case MEM_CGROUP_TARGET_THRESH:
863                         next = val + THRESHOLDS_EVENTS_TARGET;
864                         break;
865                 case MEM_CGROUP_TARGET_SOFTLIMIT:
866                         next = val + SOFTLIMIT_EVENTS_TARGET;
867                         break;
868                 default:
869                         break;
870                 }
871                 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
872                 return true;
873         }
874         return false;
875 }
876
877 /*
878  * Check events in order.
879  *
880  */
881 static void memcg_check_events(struct mem_cgroup *memcg, int nid)
882 {
883         if (IS_ENABLED(CONFIG_PREEMPT_RT))
884                 return;
885
886         /* threshold event is triggered in finer grain than soft limit */
887         if (unlikely(mem_cgroup_event_ratelimit(memcg,
888                                                 MEM_CGROUP_TARGET_THRESH))) {
889                 bool do_softlimit;
890
891                 do_softlimit = mem_cgroup_event_ratelimit(memcg,
892                                                 MEM_CGROUP_TARGET_SOFTLIMIT);
893                 mem_cgroup_threshold(memcg);
894                 if (unlikely(do_softlimit))
895                         mem_cgroup_update_tree(memcg, nid);
896         }
897 }
898
899 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
900 {
901         /*
902          * mm_update_next_owner() may clear mm->owner to NULL
903          * if it races with swapoff, page migration, etc.
904          * So this can be called with p == NULL.
905          */
906         if (unlikely(!p))
907                 return NULL;
908
909         return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
910 }
911 EXPORT_SYMBOL(mem_cgroup_from_task);
912
913 static __always_inline struct mem_cgroup *active_memcg(void)
914 {
915         if (!in_task())
916                 return this_cpu_read(int_active_memcg);
917         else
918                 return current->active_memcg;
919 }
920
921 /**
922  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
923  * @mm: mm from which memcg should be extracted. It can be NULL.
924  *
925  * Obtain a reference on mm->memcg and returns it if successful. If mm
926  * is NULL, then the memcg is chosen as follows:
927  * 1) The active memcg, if set.
928  * 2) current->mm->memcg, if available
929  * 3) root memcg
930  * If mem_cgroup is disabled, NULL is returned.
931  */
932 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
933 {
934         struct mem_cgroup *memcg;
935
936         if (mem_cgroup_disabled())
937                 return NULL;
938
939         /*
940          * Page cache insertions can happen without an
941          * actual mm context, e.g. during disk probing
942          * on boot, loopback IO, acct() writes etc.
943          *
944          * No need to css_get on root memcg as the reference
945          * counting is disabled on the root level in the
946          * cgroup core. See CSS_NO_REF.
947          */
948         if (unlikely(!mm)) {
949                 memcg = active_memcg();
950                 if (unlikely(memcg)) {
951                         /* remote memcg must hold a ref */
952                         css_get(&memcg->css);
953                         return memcg;
954                 }
955                 mm = current->mm;
956                 if (unlikely(!mm))
957                         return root_mem_cgroup;
958         }
959
960         rcu_read_lock();
961         do {
962                 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
963                 if (unlikely(!memcg))
964                         memcg = root_mem_cgroup;
965         } while (!css_tryget(&memcg->css));
966         rcu_read_unlock();
967         return memcg;
968 }
969 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
970
971 static __always_inline bool memcg_kmem_bypass(void)
972 {
973         /* Allow remote memcg charging from any context. */
974         if (unlikely(active_memcg()))
975                 return false;
976
977         /* Memcg to charge can't be determined. */
978         if (!in_task() || !current->mm || (current->flags & PF_KTHREAD))
979                 return true;
980
981         return false;
982 }
983
984 /**
985  * mem_cgroup_iter - iterate over memory cgroup hierarchy
986  * @root: hierarchy root
987  * @prev: previously returned memcg, NULL on first invocation
988  * @reclaim: cookie for shared reclaim walks, NULL for full walks
989  *
990  * Returns references to children of the hierarchy below @root, or
991  * @root itself, or %NULL after a full round-trip.
992  *
993  * Caller must pass the return value in @prev on subsequent
994  * invocations for reference counting, or use mem_cgroup_iter_break()
995  * to cancel a hierarchy walk before the round-trip is complete.
996  *
997  * Reclaimers can specify a node in @reclaim to divide up the memcgs
998  * in the hierarchy among all concurrent reclaimers operating on the
999  * same node.
1000  */
1001 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
1002                                    struct mem_cgroup *prev,
1003                                    struct mem_cgroup_reclaim_cookie *reclaim)
1004 {
1005         struct mem_cgroup_reclaim_iter *iter;
1006         struct cgroup_subsys_state *css = NULL;
1007         struct mem_cgroup *memcg = NULL;
1008         struct mem_cgroup *pos = NULL;
1009
1010         if (mem_cgroup_disabled())
1011                 return NULL;
1012
1013         if (!root)
1014                 root = root_mem_cgroup;
1015
1016         if (prev && !reclaim)
1017                 pos = prev;
1018
1019         rcu_read_lock();
1020
1021         if (reclaim) {
1022                 struct mem_cgroup_per_node *mz;
1023
1024                 mz = root->nodeinfo[reclaim->pgdat->node_id];
1025                 iter = &mz->iter;
1026
1027                 if (prev && reclaim->generation != iter->generation)
1028                         goto out_unlock;
1029
1030                 while (1) {
1031                         pos = READ_ONCE(iter->position);
1032                         if (!pos || css_tryget(&pos->css))
1033                                 break;
1034                         /*
1035                          * css reference reached zero, so iter->position will
1036                          * be cleared by ->css_released. However, we should not
1037                          * rely on this happening soon, because ->css_released
1038                          * is called from a work queue, and by busy-waiting we
1039                          * might block it. So we clear iter->position right
1040                          * away.
1041                          */
1042                         (void)cmpxchg(&iter->position, pos, NULL);
1043                 }
1044         }
1045
1046         if (pos)
1047                 css = &pos->css;
1048
1049         for (;;) {
1050                 css = css_next_descendant_pre(css, &root->css);
1051                 if (!css) {
1052                         /*
1053                          * Reclaimers share the hierarchy walk, and a
1054                          * new one might jump in right at the end of
1055                          * the hierarchy - make sure they see at least
1056                          * one group and restart from the beginning.
1057                          */
1058                         if (!prev)
1059                                 continue;
1060                         break;
1061                 }
1062
1063                 /*
1064                  * Verify the css and acquire a reference.  The root
1065                  * is provided by the caller, so we know it's alive
1066                  * and kicking, and don't take an extra reference.
1067                  */
1068                 memcg = mem_cgroup_from_css(css);
1069
1070                 if (css == &root->css)
1071                         break;
1072
1073                 if (css_tryget(css))
1074                         break;
1075
1076                 memcg = NULL;
1077         }
1078
1079         if (reclaim) {
1080                 /*
1081                  * The position could have already been updated by a competing
1082                  * thread, so check that the value hasn't changed since we read
1083                  * it to avoid reclaiming from the same cgroup twice.
1084                  */
1085                 (void)cmpxchg(&iter->position, pos, memcg);
1086
1087                 if (pos)
1088                         css_put(&pos->css);
1089
1090                 if (!memcg)
1091                         iter->generation++;
1092                 else if (!prev)
1093                         reclaim->generation = iter->generation;
1094         }
1095
1096 out_unlock:
1097         rcu_read_unlock();
1098         if (prev && prev != root)
1099                 css_put(&prev->css);
1100
1101         return memcg;
1102 }
1103
1104 /**
1105  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1106  * @root: hierarchy root
1107  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1108  */
1109 void mem_cgroup_iter_break(struct mem_cgroup *root,
1110                            struct mem_cgroup *prev)
1111 {
1112         if (!root)
1113                 root = root_mem_cgroup;
1114         if (prev && prev != root)
1115                 css_put(&prev->css);
1116 }
1117
1118 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1119                                         struct mem_cgroup *dead_memcg)
1120 {
1121         struct mem_cgroup_reclaim_iter *iter;
1122         struct mem_cgroup_per_node *mz;
1123         int nid;
1124
1125         for_each_node(nid) {
1126                 mz = from->nodeinfo[nid];
1127                 iter = &mz->iter;
1128                 cmpxchg(&iter->position, dead_memcg, NULL);
1129         }
1130 }
1131
1132 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1133 {
1134         struct mem_cgroup *memcg = dead_memcg;
1135         struct mem_cgroup *last;
1136
1137         do {
1138                 __invalidate_reclaim_iterators(memcg, dead_memcg);
1139                 last = memcg;
1140         } while ((memcg = parent_mem_cgroup(memcg)));
1141
1142         /*
1143          * When cgruop1 non-hierarchy mode is used,
1144          * parent_mem_cgroup() does not walk all the way up to the
1145          * cgroup root (root_mem_cgroup). So we have to handle
1146          * dead_memcg from cgroup root separately.
1147          */
1148         if (last != root_mem_cgroup)
1149                 __invalidate_reclaim_iterators(root_mem_cgroup,
1150                                                 dead_memcg);
1151 }
1152
1153 /**
1154  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1155  * @memcg: hierarchy root
1156  * @fn: function to call for each task
1157  * @arg: argument passed to @fn
1158  *
1159  * This function iterates over tasks attached to @memcg or to any of its
1160  * descendants and calls @fn for each task. If @fn returns a non-zero
1161  * value, the function breaks the iteration loop and returns the value.
1162  * Otherwise, it will iterate over all tasks and return 0.
1163  *
1164  * This function must not be called for the root memory cgroup.
1165  */
1166 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1167                           int (*fn)(struct task_struct *, void *), void *arg)
1168 {
1169         struct mem_cgroup *iter;
1170         int ret = 0;
1171
1172         BUG_ON(memcg == root_mem_cgroup);
1173
1174         for_each_mem_cgroup_tree(iter, memcg) {
1175                 struct css_task_iter it;
1176                 struct task_struct *task;
1177
1178                 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1179                 while (!ret && (task = css_task_iter_next(&it)))
1180                         ret = fn(task, arg);
1181                 css_task_iter_end(&it);
1182                 if (ret) {
1183                         mem_cgroup_iter_break(memcg, iter);
1184                         break;
1185                 }
1186         }
1187         return ret;
1188 }
1189
1190 #ifdef CONFIG_DEBUG_VM
1191 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1192 {
1193         struct mem_cgroup *memcg;
1194
1195         if (mem_cgroup_disabled())
1196                 return;
1197
1198         memcg = folio_memcg(folio);
1199
1200         if (!memcg)
1201                 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != root_mem_cgroup, folio);
1202         else
1203                 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1204 }
1205 #endif
1206
1207 /**
1208  * folio_lruvec_lock - Lock the lruvec for a folio.
1209  * @folio: Pointer to the folio.
1210  *
1211  * These functions are safe to use under any of the following conditions:
1212  * - folio locked
1213  * - folio_test_lru false
1214  * - folio_memcg_lock()
1215  * - folio frozen (refcount of 0)
1216  *
1217  * Return: The lruvec this folio is on with its lock held.
1218  */
1219 struct lruvec *folio_lruvec_lock(struct folio *folio)
1220 {
1221         struct lruvec *lruvec = folio_lruvec(folio);
1222
1223         spin_lock(&lruvec->lru_lock);
1224         lruvec_memcg_debug(lruvec, folio);
1225
1226         return lruvec;
1227 }
1228
1229 /**
1230  * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1231  * @folio: Pointer to the folio.
1232  *
1233  * These functions are safe to use under any of the following conditions:
1234  * - folio locked
1235  * - folio_test_lru false
1236  * - folio_memcg_lock()
1237  * - folio frozen (refcount of 0)
1238  *
1239  * Return: The lruvec this folio is on with its lock held and interrupts
1240  * disabled.
1241  */
1242 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1243 {
1244         struct lruvec *lruvec = folio_lruvec(folio);
1245
1246         spin_lock_irq(&lruvec->lru_lock);
1247         lruvec_memcg_debug(lruvec, folio);
1248
1249         return lruvec;
1250 }
1251
1252 /**
1253  * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1254  * @folio: Pointer to the folio.
1255  * @flags: Pointer to irqsave flags.
1256  *
1257  * These functions are safe to use under any of the following conditions:
1258  * - folio locked
1259  * - folio_test_lru false
1260  * - folio_memcg_lock()
1261  * - folio frozen (refcount of 0)
1262  *
1263  * Return: The lruvec this folio is on with its lock held and interrupts
1264  * disabled.
1265  */
1266 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1267                 unsigned long *flags)
1268 {
1269         struct lruvec *lruvec = folio_lruvec(folio);
1270
1271         spin_lock_irqsave(&lruvec->lru_lock, *flags);
1272         lruvec_memcg_debug(lruvec, folio);
1273
1274         return lruvec;
1275 }
1276
1277 /**
1278  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1279  * @lruvec: mem_cgroup per zone lru vector
1280  * @lru: index of lru list the page is sitting on
1281  * @zid: zone id of the accounted pages
1282  * @nr_pages: positive when adding or negative when removing
1283  *
1284  * This function must be called under lru_lock, just before a page is added
1285  * to or just after a page is removed from an lru list.
1286  */
1287 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1288                                 int zid, int nr_pages)
1289 {
1290         struct mem_cgroup_per_node *mz;
1291         unsigned long *lru_size;
1292         long size;
1293
1294         if (mem_cgroup_disabled())
1295                 return;
1296
1297         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1298         lru_size = &mz->lru_zone_size[zid][lru];
1299
1300         if (nr_pages < 0)
1301                 *lru_size += nr_pages;
1302
1303         size = *lru_size;
1304         if (WARN_ONCE(size < 0,
1305                 "%s(%p, %d, %d): lru_size %ld\n",
1306                 __func__, lruvec, lru, nr_pages, size)) {
1307                 VM_BUG_ON(1);
1308                 *lru_size = 0;
1309         }
1310
1311         if (nr_pages > 0)
1312                 *lru_size += nr_pages;
1313 }
1314
1315 /**
1316  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1317  * @memcg: the memory cgroup
1318  *
1319  * Returns the maximum amount of memory @mem can be charged with, in
1320  * pages.
1321  */
1322 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1323 {
1324         unsigned long margin = 0;
1325         unsigned long count;
1326         unsigned long limit;
1327
1328         count = page_counter_read(&memcg->memory);
1329         limit = READ_ONCE(memcg->memory.max);
1330         if (count < limit)
1331                 margin = limit - count;
1332
1333         if (do_memsw_account()) {
1334                 count = page_counter_read(&memcg->memsw);
1335                 limit = READ_ONCE(memcg->memsw.max);
1336                 if (count < limit)
1337                         margin = min(margin, limit - count);
1338                 else
1339                         margin = 0;
1340         }
1341
1342         return margin;
1343 }
1344
1345 /*
1346  * A routine for checking "mem" is under move_account() or not.
1347  *
1348  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1349  * moving cgroups. This is for waiting at high-memory pressure
1350  * caused by "move".
1351  */
1352 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1353 {
1354         struct mem_cgroup *from;
1355         struct mem_cgroup *to;
1356         bool ret = false;
1357         /*
1358          * Unlike task_move routines, we access mc.to, mc.from not under
1359          * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1360          */
1361         spin_lock(&mc.lock);
1362         from = mc.from;
1363         to = mc.to;
1364         if (!from)
1365                 goto unlock;
1366
1367         ret = mem_cgroup_is_descendant(from, memcg) ||
1368                 mem_cgroup_is_descendant(to, memcg);
1369 unlock:
1370         spin_unlock(&mc.lock);
1371         return ret;
1372 }
1373
1374 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1375 {
1376         if (mc.moving_task && current != mc.moving_task) {
1377                 if (mem_cgroup_under_move(memcg)) {
1378                         DEFINE_WAIT(wait);
1379                         prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1380                         /* moving charge context might have finished. */
1381                         if (mc.moving_task)
1382                                 schedule();
1383                         finish_wait(&mc.waitq, &wait);
1384                         return true;
1385                 }
1386         }
1387         return false;
1388 }
1389
1390 struct memory_stat {
1391         const char *name;
1392         unsigned int idx;
1393 };
1394
1395 static const struct memory_stat memory_stats[] = {
1396         { "anon",                       NR_ANON_MAPPED                  },
1397         { "file",                       NR_FILE_PAGES                   },
1398         { "kernel",                     MEMCG_KMEM                      },
1399         { "kernel_stack",               NR_KERNEL_STACK_KB              },
1400         { "pagetables",                 NR_PAGETABLE                    },
1401         { "percpu",                     MEMCG_PERCPU_B                  },
1402         { "sock",                       MEMCG_SOCK                      },
1403         { "vmalloc",                    MEMCG_VMALLOC                   },
1404         { "shmem",                      NR_SHMEM                        },
1405         { "file_mapped",                NR_FILE_MAPPED                  },
1406         { "file_dirty",                 NR_FILE_DIRTY                   },
1407         { "file_writeback",             NR_WRITEBACK                    },
1408 #ifdef CONFIG_SWAP
1409         { "swapcached",                 NR_SWAPCACHE                    },
1410 #endif
1411 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1412         { "anon_thp",                   NR_ANON_THPS                    },
1413         { "file_thp",                   NR_FILE_THPS                    },
1414         { "shmem_thp",                  NR_SHMEM_THPS                   },
1415 #endif
1416         { "inactive_anon",              NR_INACTIVE_ANON                },
1417         { "active_anon",                NR_ACTIVE_ANON                  },
1418         { "inactive_file",              NR_INACTIVE_FILE                },
1419         { "active_file",                NR_ACTIVE_FILE                  },
1420         { "unevictable",                NR_UNEVICTABLE                  },
1421         { "slab_reclaimable",           NR_SLAB_RECLAIMABLE_B           },
1422         { "slab_unreclaimable",         NR_SLAB_UNRECLAIMABLE_B         },
1423
1424         /* The memory events */
1425         { "workingset_refault_anon",    WORKINGSET_REFAULT_ANON         },
1426         { "workingset_refault_file",    WORKINGSET_REFAULT_FILE         },
1427         { "workingset_activate_anon",   WORKINGSET_ACTIVATE_ANON        },
1428         { "workingset_activate_file",   WORKINGSET_ACTIVATE_FILE        },
1429         { "workingset_restore_anon",    WORKINGSET_RESTORE_ANON         },
1430         { "workingset_restore_file",    WORKINGSET_RESTORE_FILE         },
1431         { "workingset_nodereclaim",     WORKINGSET_NODERECLAIM          },
1432 };
1433
1434 /* Translate stat items to the correct unit for memory.stat output */
1435 static int memcg_page_state_unit(int item)
1436 {
1437         switch (item) {
1438         case MEMCG_PERCPU_B:
1439         case NR_SLAB_RECLAIMABLE_B:
1440         case NR_SLAB_UNRECLAIMABLE_B:
1441         case WORKINGSET_REFAULT_ANON:
1442         case WORKINGSET_REFAULT_FILE:
1443         case WORKINGSET_ACTIVATE_ANON:
1444         case WORKINGSET_ACTIVATE_FILE:
1445         case WORKINGSET_RESTORE_ANON:
1446         case WORKINGSET_RESTORE_FILE:
1447         case WORKINGSET_NODERECLAIM:
1448                 return 1;
1449         case NR_KERNEL_STACK_KB:
1450                 return SZ_1K;
1451         default:
1452                 return PAGE_SIZE;
1453         }
1454 }
1455
1456 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1457                                                     int item)
1458 {
1459         return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
1460 }
1461
1462 static char *memory_stat_format(struct mem_cgroup *memcg)
1463 {
1464         struct seq_buf s;
1465         int i;
1466
1467         seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1468         if (!s.buffer)
1469                 return NULL;
1470
1471         /*
1472          * Provide statistics on the state of the memory subsystem as
1473          * well as cumulative event counters that show past behavior.
1474          *
1475          * This list is ordered following a combination of these gradients:
1476          * 1) generic big picture -> specifics and details
1477          * 2) reflecting userspace activity -> reflecting kernel heuristics
1478          *
1479          * Current memory state:
1480          */
1481         mem_cgroup_flush_stats();
1482
1483         for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1484                 u64 size;
1485
1486                 size = memcg_page_state_output(memcg, memory_stats[i].idx);
1487                 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
1488
1489                 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1490                         size += memcg_page_state_output(memcg,
1491                                                         NR_SLAB_RECLAIMABLE_B);
1492                         seq_buf_printf(&s, "slab %llu\n", size);
1493                 }
1494         }
1495
1496         /* Accumulated memory events */
1497
1498         seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
1499                        memcg_events(memcg, PGFAULT));
1500         seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
1501                        memcg_events(memcg, PGMAJFAULT));
1502         seq_buf_printf(&s, "%s %lu\n",  vm_event_name(PGREFILL),
1503                        memcg_events(memcg, PGREFILL));
1504         seq_buf_printf(&s, "pgscan %lu\n",
1505                        memcg_events(memcg, PGSCAN_KSWAPD) +
1506                        memcg_events(memcg, PGSCAN_DIRECT));
1507         seq_buf_printf(&s, "pgsteal %lu\n",
1508                        memcg_events(memcg, PGSTEAL_KSWAPD) +
1509                        memcg_events(memcg, PGSTEAL_DIRECT));
1510         seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
1511                        memcg_events(memcg, PGACTIVATE));
1512         seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
1513                        memcg_events(memcg, PGDEACTIVATE));
1514         seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
1515                        memcg_events(memcg, PGLAZYFREE));
1516         seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
1517                        memcg_events(memcg, PGLAZYFREED));
1518
1519 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1520         seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
1521                        memcg_events(memcg, THP_FAULT_ALLOC));
1522         seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
1523                        memcg_events(memcg, THP_COLLAPSE_ALLOC));
1524 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1525
1526         /* The above should easily fit into one page */
1527         WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1528
1529         return s.buffer;
1530 }
1531
1532 #define K(x) ((x) << (PAGE_SHIFT-10))
1533 /**
1534  * mem_cgroup_print_oom_context: Print OOM information relevant to
1535  * memory controller.
1536  * @memcg: The memory cgroup that went over limit
1537  * @p: Task that is going to be killed
1538  *
1539  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1540  * enabled
1541  */
1542 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1543 {
1544         rcu_read_lock();
1545
1546         if (memcg) {
1547                 pr_cont(",oom_memcg=");
1548                 pr_cont_cgroup_path(memcg->css.cgroup);
1549         } else
1550                 pr_cont(",global_oom");
1551         if (p) {
1552                 pr_cont(",task_memcg=");
1553                 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1554         }
1555         rcu_read_unlock();
1556 }
1557
1558 /**
1559  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1560  * memory controller.
1561  * @memcg: The memory cgroup that went over limit
1562  */
1563 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1564 {
1565         char *buf;
1566
1567         pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1568                 K((u64)page_counter_read(&memcg->memory)),
1569                 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1570         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1571                 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1572                         K((u64)page_counter_read(&memcg->swap)),
1573                         K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1574         else {
1575                 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1576                         K((u64)page_counter_read(&memcg->memsw)),
1577                         K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1578                 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1579                         K((u64)page_counter_read(&memcg->kmem)),
1580                         K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1581         }
1582
1583         pr_info("Memory cgroup stats for ");
1584         pr_cont_cgroup_path(memcg->css.cgroup);
1585         pr_cont(":");
1586         buf = memory_stat_format(memcg);
1587         if (!buf)
1588                 return;
1589         pr_info("%s", buf);
1590         kfree(buf);
1591 }
1592
1593 /*
1594  * Return the memory (and swap, if configured) limit for a memcg.
1595  */
1596 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1597 {
1598         unsigned long max = READ_ONCE(memcg->memory.max);
1599
1600         if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
1601                 if (mem_cgroup_swappiness(memcg))
1602                         max += min(READ_ONCE(memcg->swap.max),
1603                                    (unsigned long)total_swap_pages);
1604         } else { /* v1 */
1605                 if (mem_cgroup_swappiness(memcg)) {
1606                         /* Calculate swap excess capacity from memsw limit */
1607                         unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1608
1609                         max += min(swap, (unsigned long)total_swap_pages);
1610                 }
1611         }
1612         return max;
1613 }
1614
1615 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1616 {
1617         return page_counter_read(&memcg->memory);
1618 }
1619
1620 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1621                                      int order)
1622 {
1623         struct oom_control oc = {
1624                 .zonelist = NULL,
1625                 .nodemask = NULL,
1626                 .memcg = memcg,
1627                 .gfp_mask = gfp_mask,
1628                 .order = order,
1629         };
1630         bool ret = true;
1631
1632         if (mutex_lock_killable(&oom_lock))
1633                 return true;
1634
1635         if (mem_cgroup_margin(memcg) >= (1 << order))
1636                 goto unlock;
1637
1638         /*
1639          * A few threads which were not waiting at mutex_lock_killable() can
1640          * fail to bail out. Therefore, check again after holding oom_lock.
1641          */
1642         ret = task_is_dying() || out_of_memory(&oc);
1643
1644 unlock:
1645         mutex_unlock(&oom_lock);
1646         return ret;
1647 }
1648
1649 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1650                                    pg_data_t *pgdat,
1651                                    gfp_t gfp_mask,
1652                                    unsigned long *total_scanned)
1653 {
1654         struct mem_cgroup *victim = NULL;
1655         int total = 0;
1656         int loop = 0;
1657         unsigned long excess;
1658         unsigned long nr_scanned;
1659         struct mem_cgroup_reclaim_cookie reclaim = {
1660                 .pgdat = pgdat,
1661         };
1662
1663         excess = soft_limit_excess(root_memcg);
1664
1665         while (1) {
1666                 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1667                 if (!victim) {
1668                         loop++;
1669                         if (loop >= 2) {
1670                                 /*
1671                                  * If we have not been able to reclaim
1672                                  * anything, it might because there are
1673                                  * no reclaimable pages under this hierarchy
1674                                  */
1675                                 if (!total)
1676                                         break;
1677                                 /*
1678                                  * We want to do more targeted reclaim.
1679                                  * excess >> 2 is not to excessive so as to
1680                                  * reclaim too much, nor too less that we keep
1681                                  * coming back to reclaim from this cgroup
1682                                  */
1683                                 if (total >= (excess >> 2) ||
1684                                         (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1685                                         break;
1686                         }
1687                         continue;
1688                 }
1689                 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1690                                         pgdat, &nr_scanned);
1691                 *total_scanned += nr_scanned;
1692                 if (!soft_limit_excess(root_memcg))
1693                         break;
1694         }
1695         mem_cgroup_iter_break(root_memcg, victim);
1696         return total;
1697 }
1698
1699 #ifdef CONFIG_LOCKDEP
1700 static struct lockdep_map memcg_oom_lock_dep_map = {
1701         .name = "memcg_oom_lock",
1702 };
1703 #endif
1704
1705 static DEFINE_SPINLOCK(memcg_oom_lock);
1706
1707 /*
1708  * Check OOM-Killer is already running under our hierarchy.
1709  * If someone is running, return false.
1710  */
1711 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1712 {
1713         struct mem_cgroup *iter, *failed = NULL;
1714
1715         spin_lock(&memcg_oom_lock);
1716
1717         for_each_mem_cgroup_tree(iter, memcg) {
1718                 if (iter->oom_lock) {
1719                         /*
1720                          * this subtree of our hierarchy is already locked
1721                          * so we cannot give a lock.
1722                          */
1723                         failed = iter;
1724                         mem_cgroup_iter_break(memcg, iter);
1725                         break;
1726                 } else
1727                         iter->oom_lock = true;
1728         }
1729
1730         if (failed) {
1731                 /*
1732                  * OK, we failed to lock the whole subtree so we have
1733                  * to clean up what we set up to the failing subtree
1734                  */
1735                 for_each_mem_cgroup_tree(iter, memcg) {
1736                         if (iter == failed) {
1737                                 mem_cgroup_iter_break(memcg, iter);
1738                                 break;
1739                         }
1740                         iter->oom_lock = false;
1741                 }
1742         } else
1743                 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1744
1745         spin_unlock(&memcg_oom_lock);
1746
1747         return !failed;
1748 }
1749
1750 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1751 {
1752         struct mem_cgroup *iter;
1753
1754         spin_lock(&memcg_oom_lock);
1755         mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1756         for_each_mem_cgroup_tree(iter, memcg)
1757                 iter->oom_lock = false;
1758         spin_unlock(&memcg_oom_lock);
1759 }
1760
1761 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1762 {
1763         struct mem_cgroup *iter;
1764
1765         spin_lock(&memcg_oom_lock);
1766         for_each_mem_cgroup_tree(iter, memcg)
1767                 iter->under_oom++;
1768         spin_unlock(&memcg_oom_lock);
1769 }
1770
1771 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1772 {
1773         struct mem_cgroup *iter;
1774
1775         /*
1776          * Be careful about under_oom underflows because a child memcg
1777          * could have been added after mem_cgroup_mark_under_oom.
1778          */
1779         spin_lock(&memcg_oom_lock);
1780         for_each_mem_cgroup_tree(iter, memcg)
1781                 if (iter->under_oom > 0)
1782                         iter->under_oom--;
1783         spin_unlock(&memcg_oom_lock);
1784 }
1785
1786 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1787
1788 struct oom_wait_info {
1789         struct mem_cgroup *memcg;
1790         wait_queue_entry_t      wait;
1791 };
1792
1793 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1794         unsigned mode, int sync, void *arg)
1795 {
1796         struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1797         struct mem_cgroup *oom_wait_memcg;
1798         struct oom_wait_info *oom_wait_info;
1799
1800         oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1801         oom_wait_memcg = oom_wait_info->memcg;
1802
1803         if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1804             !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1805                 return 0;
1806         return autoremove_wake_function(wait, mode, sync, arg);
1807 }
1808
1809 static void memcg_oom_recover(struct mem_cgroup *memcg)
1810 {
1811         /*
1812          * For the following lockless ->under_oom test, the only required
1813          * guarantee is that it must see the state asserted by an OOM when
1814          * this function is called as a result of userland actions
1815          * triggered by the notification of the OOM.  This is trivially
1816          * achieved by invoking mem_cgroup_mark_under_oom() before
1817          * triggering notification.
1818          */
1819         if (memcg && memcg->under_oom)
1820                 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1821 }
1822
1823 /*
1824  * Returns true if successfully killed one or more processes. Though in some
1825  * corner cases it can return true even without killing any process.
1826  */
1827 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1828 {
1829         bool locked, ret;
1830
1831         if (order > PAGE_ALLOC_COSTLY_ORDER)
1832                 return false;
1833
1834         memcg_memory_event(memcg, MEMCG_OOM);
1835
1836         /*
1837          * We are in the middle of the charge context here, so we
1838          * don't want to block when potentially sitting on a callstack
1839          * that holds all kinds of filesystem and mm locks.
1840          *
1841          * cgroup1 allows disabling the OOM killer and waiting for outside
1842          * handling until the charge can succeed; remember the context and put
1843          * the task to sleep at the end of the page fault when all locks are
1844          * released.
1845          *
1846          * On the other hand, in-kernel OOM killer allows for an async victim
1847          * memory reclaim (oom_reaper) and that means that we are not solely
1848          * relying on the oom victim to make a forward progress and we can
1849          * invoke the oom killer here.
1850          *
1851          * Please note that mem_cgroup_out_of_memory might fail to find a
1852          * victim and then we have to bail out from the charge path.
1853          */
1854         if (memcg->oom_kill_disable) {
1855                 if (current->in_user_fault) {
1856                         css_get(&memcg->css);
1857                         current->memcg_in_oom = memcg;
1858                         current->memcg_oom_gfp_mask = mask;
1859                         current->memcg_oom_order = order;
1860                 }
1861                 return false;
1862         }
1863
1864         mem_cgroup_mark_under_oom(memcg);
1865
1866         locked = mem_cgroup_oom_trylock(memcg);
1867
1868         if (locked)
1869                 mem_cgroup_oom_notify(memcg);
1870
1871         mem_cgroup_unmark_under_oom(memcg);
1872         ret = mem_cgroup_out_of_memory(memcg, mask, order);
1873
1874         if (locked)
1875                 mem_cgroup_oom_unlock(memcg);
1876
1877         return ret;
1878 }
1879
1880 /**
1881  * mem_cgroup_oom_synchronize - complete memcg OOM handling
1882  * @handle: actually kill/wait or just clean up the OOM state
1883  *
1884  * This has to be called at the end of a page fault if the memcg OOM
1885  * handler was enabled.
1886  *
1887  * Memcg supports userspace OOM handling where failed allocations must
1888  * sleep on a waitqueue until the userspace task resolves the
1889  * situation.  Sleeping directly in the charge context with all kinds
1890  * of locks held is not a good idea, instead we remember an OOM state
1891  * in the task and mem_cgroup_oom_synchronize() has to be called at
1892  * the end of the page fault to complete the OOM handling.
1893  *
1894  * Returns %true if an ongoing memcg OOM situation was detected and
1895  * completed, %false otherwise.
1896  */
1897 bool mem_cgroup_oom_synchronize(bool handle)
1898 {
1899         struct mem_cgroup *memcg = current->memcg_in_oom;
1900         struct oom_wait_info owait;
1901         bool locked;
1902
1903         /* OOM is global, do not handle */
1904         if (!memcg)
1905                 return false;
1906
1907         if (!handle)
1908                 goto cleanup;
1909
1910         owait.memcg = memcg;
1911         owait.wait.flags = 0;
1912         owait.wait.func = memcg_oom_wake_function;
1913         owait.wait.private = current;
1914         INIT_LIST_HEAD(&owait.wait.entry);
1915
1916         prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1917         mem_cgroup_mark_under_oom(memcg);
1918
1919         locked = mem_cgroup_oom_trylock(memcg);
1920
1921         if (locked)
1922                 mem_cgroup_oom_notify(memcg);
1923
1924         if (locked && !memcg->oom_kill_disable) {
1925                 mem_cgroup_unmark_under_oom(memcg);
1926                 finish_wait(&memcg_oom_waitq, &owait.wait);
1927                 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1928                                          current->memcg_oom_order);
1929         } else {
1930                 schedule();
1931                 mem_cgroup_unmark_under_oom(memcg);
1932                 finish_wait(&memcg_oom_waitq, &owait.wait);
1933         }
1934
1935         if (locked) {
1936                 mem_cgroup_oom_unlock(memcg);
1937                 /*
1938                  * There is no guarantee that an OOM-lock contender
1939                  * sees the wakeups triggered by the OOM kill
1940                  * uncharges.  Wake any sleepers explicitly.
1941                  */
1942                 memcg_oom_recover(memcg);
1943         }
1944 cleanup:
1945         current->memcg_in_oom = NULL;
1946         css_put(&memcg->css);
1947         return true;
1948 }
1949
1950 /**
1951  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1952  * @victim: task to be killed by the OOM killer
1953  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1954  *
1955  * Returns a pointer to a memory cgroup, which has to be cleaned up
1956  * by killing all belonging OOM-killable tasks.
1957  *
1958  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1959  */
1960 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1961                                             struct mem_cgroup *oom_domain)
1962 {
1963         struct mem_cgroup *oom_group = NULL;
1964         struct mem_cgroup *memcg;
1965
1966         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1967                 return NULL;
1968
1969         if (!oom_domain)
1970                 oom_domain = root_mem_cgroup;
1971
1972         rcu_read_lock();
1973
1974         memcg = mem_cgroup_from_task(victim);
1975         if (memcg == root_mem_cgroup)
1976                 goto out;
1977
1978         /*
1979          * If the victim task has been asynchronously moved to a different
1980          * memory cgroup, we might end up killing tasks outside oom_domain.
1981          * In this case it's better to ignore memory.group.oom.
1982          */
1983         if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1984                 goto out;
1985
1986         /*
1987          * Traverse the memory cgroup hierarchy from the victim task's
1988          * cgroup up to the OOMing cgroup (or root) to find the
1989          * highest-level memory cgroup with oom.group set.
1990          */
1991         for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1992                 if (memcg->oom_group)
1993                         oom_group = memcg;
1994
1995                 if (memcg == oom_domain)
1996                         break;
1997         }
1998
1999         if (oom_group)
2000                 css_get(&oom_group->css);
2001 out:
2002         rcu_read_unlock();
2003
2004         return oom_group;
2005 }
2006
2007 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
2008 {
2009         pr_info("Tasks in ");
2010         pr_cont_cgroup_path(memcg->css.cgroup);
2011         pr_cont(" are going to be killed due to memory.oom.group set\n");
2012 }
2013
2014 /**
2015  * folio_memcg_lock - Bind a folio to its memcg.
2016  * @folio: The folio.
2017  *
2018  * This function prevents unlocked LRU folios from being moved to
2019  * another cgroup.
2020  *
2021  * It ensures lifetime of the bound memcg.  The caller is responsible
2022  * for the lifetime of the folio.
2023  */
2024 void folio_memcg_lock(struct folio *folio)
2025 {
2026         struct mem_cgroup *memcg;
2027         unsigned long flags;
2028
2029         /*
2030          * The RCU lock is held throughout the transaction.  The fast
2031          * path can get away without acquiring the memcg->move_lock
2032          * because page moving starts with an RCU grace period.
2033          */
2034         rcu_read_lock();
2035
2036         if (mem_cgroup_disabled())
2037                 return;
2038 again:
2039         memcg = folio_memcg(folio);
2040         if (unlikely(!memcg))
2041                 return;
2042
2043 #ifdef CONFIG_PROVE_LOCKING
2044         local_irq_save(flags);
2045         might_lock(&memcg->move_lock);
2046         local_irq_restore(flags);
2047 #endif
2048
2049         if (atomic_read(&memcg->moving_account) <= 0)
2050                 return;
2051
2052         spin_lock_irqsave(&memcg->move_lock, flags);
2053         if (memcg != folio_memcg(folio)) {
2054                 spin_unlock_irqrestore(&memcg->move_lock, flags);
2055                 goto again;
2056         }
2057
2058         /*
2059          * When charge migration first begins, we can have multiple
2060          * critical sections holding the fast-path RCU lock and one
2061          * holding the slowpath move_lock. Track the task who has the
2062          * move_lock for unlock_page_memcg().
2063          */
2064         memcg->move_lock_task = current;
2065         memcg->move_lock_flags = flags;
2066 }
2067
2068 void lock_page_memcg(struct page *page)
2069 {
2070         folio_memcg_lock(page_folio(page));
2071 }
2072
2073 static void __folio_memcg_unlock(struct mem_cgroup *memcg)
2074 {
2075         if (memcg && memcg->move_lock_task == current) {
2076                 unsigned long flags = memcg->move_lock_flags;
2077
2078                 memcg->move_lock_task = NULL;
2079                 memcg->move_lock_flags = 0;
2080
2081                 spin_unlock_irqrestore(&memcg->move_lock, flags);
2082         }
2083
2084         rcu_read_unlock();
2085 }
2086
2087 /**
2088  * folio_memcg_unlock - Release the binding between a folio and its memcg.
2089  * @folio: The folio.
2090  *
2091  * This releases the binding created by folio_memcg_lock().  This does
2092  * not change the accounting of this folio to its memcg, but it does
2093  * permit others to change it.
2094  */
2095 void folio_memcg_unlock(struct folio *folio)
2096 {
2097         __folio_memcg_unlock(folio_memcg(folio));
2098 }
2099
2100 void unlock_page_memcg(struct page *page)
2101 {
2102         folio_memcg_unlock(page_folio(page));
2103 }
2104
2105 struct memcg_stock_pcp {
2106         local_lock_t stock_lock;
2107         struct mem_cgroup *cached; /* this never be root cgroup */
2108         unsigned int nr_pages;
2109
2110 #ifdef CONFIG_MEMCG_KMEM
2111         struct obj_cgroup *cached_objcg;
2112         struct pglist_data *cached_pgdat;
2113         unsigned int nr_bytes;
2114         int nr_slab_reclaimable_b;
2115         int nr_slab_unreclaimable_b;
2116 #endif
2117
2118         struct work_struct work;
2119         unsigned long flags;
2120 #define FLUSHING_CACHED_CHARGE  0
2121 };
2122 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2123         .stock_lock = INIT_LOCAL_LOCK(stock_lock),
2124 };
2125 static DEFINE_MUTEX(percpu_charge_mutex);
2126
2127 #ifdef CONFIG_MEMCG_KMEM
2128 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
2129 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2130                                      struct mem_cgroup *root_memcg);
2131 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
2132
2133 #else
2134 static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2135 {
2136         return NULL;
2137 }
2138 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2139                                      struct mem_cgroup *root_memcg)
2140 {
2141         return false;
2142 }
2143 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2144 {
2145 }
2146 #endif
2147
2148 /**
2149  * consume_stock: Try to consume stocked charge on this cpu.
2150  * @memcg: memcg to consume from.
2151  * @nr_pages: how many pages to charge.
2152  *
2153  * The charges will only happen if @memcg matches the current cpu's memcg
2154  * stock, and at least @nr_pages are available in that stock.  Failure to
2155  * service an allocation will refill the stock.
2156  *
2157  * returns true if successful, false otherwise.
2158  */
2159 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2160 {
2161         struct memcg_stock_pcp *stock;
2162         unsigned long flags;
2163         bool ret = false;
2164
2165         if (nr_pages > MEMCG_CHARGE_BATCH)
2166                 return ret;
2167
2168         local_lock_irqsave(&memcg_stock.stock_lock, flags);
2169
2170         stock = this_cpu_ptr(&memcg_stock);
2171         if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2172                 stock->nr_pages -= nr_pages;
2173                 ret = true;
2174         }
2175
2176         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2177
2178         return ret;
2179 }
2180
2181 /*
2182  * Returns stocks cached in percpu and reset cached information.
2183  */
2184 static void drain_stock(struct memcg_stock_pcp *stock)
2185 {
2186         struct mem_cgroup *old = stock->cached;
2187
2188         if (!old)
2189                 return;
2190
2191         if (stock->nr_pages) {
2192                 page_counter_uncharge(&old->memory, stock->nr_pages);
2193                 if (do_memsw_account())
2194                         page_counter_uncharge(&old->memsw, stock->nr_pages);
2195                 stock->nr_pages = 0;
2196         }
2197
2198         css_put(&old->css);
2199         stock->cached = NULL;
2200 }
2201
2202 static void drain_local_stock(struct work_struct *dummy)
2203 {
2204         struct memcg_stock_pcp *stock;
2205         struct obj_cgroup *old = NULL;
2206         unsigned long flags;
2207
2208         /*
2209          * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2210          * drain_stock races is that we always operate on local CPU stock
2211          * here with IRQ disabled
2212          */
2213         local_lock_irqsave(&memcg_stock.stock_lock, flags);
2214
2215         stock = this_cpu_ptr(&memcg_stock);
2216         old = drain_obj_stock(stock);
2217         drain_stock(stock);
2218         clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2219
2220         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2221         if (old)
2222                 obj_cgroup_put(old);
2223 }
2224
2225 /*
2226  * Cache charges(val) to local per_cpu area.
2227  * This will be consumed by consume_stock() function, later.
2228  */
2229 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2230 {
2231         struct memcg_stock_pcp *stock;
2232
2233         stock = this_cpu_ptr(&memcg_stock);
2234         if (stock->cached != memcg) { /* reset if necessary */
2235                 drain_stock(stock);
2236                 css_get(&memcg->css);
2237                 stock->cached = memcg;
2238         }
2239         stock->nr_pages += nr_pages;
2240
2241         if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2242                 drain_stock(stock);
2243 }
2244
2245 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2246 {
2247         unsigned long flags;
2248
2249         local_lock_irqsave(&memcg_stock.stock_lock, flags);
2250         __refill_stock(memcg, nr_pages);
2251         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2252 }
2253
2254 /*
2255  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2256  * of the hierarchy under it.
2257  */
2258 static void drain_all_stock(struct mem_cgroup *root_memcg)
2259 {
2260         int cpu, curcpu;
2261
2262         /* If someone's already draining, avoid adding running more workers. */
2263         if (!mutex_trylock(&percpu_charge_mutex))
2264                 return;
2265         /*
2266          * Notify other cpus that system-wide "drain" is running
2267          * We do not care about races with the cpu hotplug because cpu down
2268          * as well as workers from this path always operate on the local
2269          * per-cpu data. CPU up doesn't touch memcg_stock at all.
2270          */
2271         migrate_disable();
2272         curcpu = smp_processor_id();
2273         for_each_online_cpu(cpu) {
2274                 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2275                 struct mem_cgroup *memcg;
2276                 bool flush = false;
2277
2278                 rcu_read_lock();
2279                 memcg = stock->cached;
2280                 if (memcg && stock->nr_pages &&
2281                     mem_cgroup_is_descendant(memcg, root_memcg))
2282                         flush = true;
2283                 else if (obj_stock_flush_required(stock, root_memcg))
2284                         flush = true;
2285                 rcu_read_unlock();
2286
2287                 if (flush &&
2288                     !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2289                         if (cpu == curcpu)
2290                                 drain_local_stock(&stock->work);
2291                         else
2292                                 schedule_work_on(cpu, &stock->work);
2293                 }
2294         }
2295         migrate_enable();
2296         mutex_unlock(&percpu_charge_mutex);
2297 }
2298
2299 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2300 {
2301         struct memcg_stock_pcp *stock;
2302
2303         stock = &per_cpu(memcg_stock, cpu);
2304         drain_stock(stock);
2305
2306         return 0;
2307 }
2308
2309 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2310                                   unsigned int nr_pages,
2311                                   gfp_t gfp_mask)
2312 {
2313         unsigned long nr_reclaimed = 0;
2314
2315         do {
2316                 unsigned long pflags;
2317
2318                 if (page_counter_read(&memcg->memory) <=
2319                     READ_ONCE(memcg->memory.high))
2320                         continue;
2321
2322                 memcg_memory_event(memcg, MEMCG_HIGH);
2323
2324                 psi_memstall_enter(&pflags);
2325                 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2326                                                              gfp_mask, true);
2327                 psi_memstall_leave(&pflags);
2328         } while ((memcg = parent_mem_cgroup(memcg)) &&
2329                  !mem_cgroup_is_root(memcg));
2330
2331         return nr_reclaimed;
2332 }
2333
2334 static void high_work_func(struct work_struct *work)
2335 {
2336         struct mem_cgroup *memcg;
2337
2338         memcg = container_of(work, struct mem_cgroup, high_work);
2339         reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2340 }
2341
2342 /*
2343  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2344  * enough to still cause a significant slowdown in most cases, while still
2345  * allowing diagnostics and tracing to proceed without becoming stuck.
2346  */
2347 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2348
2349 /*
2350  * When calculating the delay, we use these either side of the exponentiation to
2351  * maintain precision and scale to a reasonable number of jiffies (see the table
2352  * below.
2353  *
2354  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2355  *   overage ratio to a delay.
2356  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2357  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2358  *   to produce a reasonable delay curve.
2359  *
2360  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2361  * reasonable delay curve compared to precision-adjusted overage, not
2362  * penalising heavily at first, but still making sure that growth beyond the
2363  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2364  * example, with a high of 100 megabytes:
2365  *
2366  *  +-------+------------------------+
2367  *  | usage | time to allocate in ms |
2368  *  +-------+------------------------+
2369  *  | 100M  |                      0 |
2370  *  | 101M  |                      6 |
2371  *  | 102M  |                     25 |
2372  *  | 103M  |                     57 |
2373  *  | 104M  |                    102 |
2374  *  | 105M  |                    159 |
2375  *  | 106M  |                    230 |
2376  *  | 107M  |                    313 |
2377  *  | 108M  |                    409 |
2378  *  | 109M  |                    518 |
2379  *  | 110M  |                    639 |
2380  *  | 111M  |                    774 |
2381  *  | 112M  |                    921 |
2382  *  | 113M  |                   1081 |
2383  *  | 114M  |                   1254 |
2384  *  | 115M  |                   1439 |
2385  *  | 116M  |                   1638 |
2386  *  | 117M  |                   1849 |
2387  *  | 118M  |                   2000 |
2388  *  | 119M  |                   2000 |
2389  *  | 120M  |                   2000 |
2390  *  +-------+------------------------+
2391  */
2392  #define MEMCG_DELAY_PRECISION_SHIFT 20
2393  #define MEMCG_DELAY_SCALING_SHIFT 14
2394
2395 static u64 calculate_overage(unsigned long usage, unsigned long high)
2396 {
2397         u64 overage;
2398
2399         if (usage <= high)
2400                 return 0;
2401
2402         /*
2403          * Prevent division by 0 in overage calculation by acting as if
2404          * it was a threshold of 1 page
2405          */
2406         high = max(high, 1UL);
2407
2408         overage = usage - high;
2409         overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2410         return div64_u64(overage, high);
2411 }
2412
2413 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2414 {
2415         u64 overage, max_overage = 0;
2416
2417         do {
2418                 overage = calculate_overage(page_counter_read(&memcg->memory),
2419                                             READ_ONCE(memcg->memory.high));
2420                 max_overage = max(overage, max_overage);
2421         } while ((memcg = parent_mem_cgroup(memcg)) &&
2422                  !mem_cgroup_is_root(memcg));
2423
2424         return max_overage;
2425 }
2426
2427 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2428 {
2429         u64 overage, max_overage = 0;
2430
2431         do {
2432                 overage = calculate_overage(page_counter_read(&memcg->swap),
2433                                             READ_ONCE(memcg->swap.high));
2434                 if (overage)
2435                         memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2436                 max_overage = max(overage, max_overage);
2437         } while ((memcg = parent_mem_cgroup(memcg)) &&
2438                  !mem_cgroup_is_root(memcg));
2439
2440         return max_overage;
2441 }
2442
2443 /*
2444  * Get the number of jiffies that we should penalise a mischievous cgroup which
2445  * is exceeding its memory.high by checking both it and its ancestors.
2446  */
2447 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2448                                           unsigned int nr_pages,
2449                                           u64 max_overage)
2450 {
2451         unsigned long penalty_jiffies;
2452
2453         if (!max_overage)
2454                 return 0;
2455
2456         /*
2457          * We use overage compared to memory.high to calculate the number of
2458          * jiffies to sleep (penalty_jiffies). Ideally this value should be
2459          * fairly lenient on small overages, and increasingly harsh when the
2460          * memcg in question makes it clear that it has no intention of stopping
2461          * its crazy behaviour, so we exponentially increase the delay based on
2462          * overage amount.
2463          */
2464         penalty_jiffies = max_overage * max_overage * HZ;
2465         penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2466         penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2467
2468         /*
2469          * Factor in the task's own contribution to the overage, such that four
2470          * N-sized allocations are throttled approximately the same as one
2471          * 4N-sized allocation.
2472          *
2473          * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2474          * larger the current charge patch is than that.
2475          */
2476         return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2477 }
2478
2479 /*
2480  * Scheduled by try_charge() to be executed from the userland return path
2481  * and reclaims memory over the high limit.
2482  */
2483 void mem_cgroup_handle_over_high(void)
2484 {
2485         unsigned long penalty_jiffies;
2486         unsigned long pflags;
2487         unsigned long nr_reclaimed;
2488         unsigned int nr_pages = current->memcg_nr_pages_over_high;
2489         int nr_retries = MAX_RECLAIM_RETRIES;
2490         struct mem_cgroup *memcg;
2491         bool in_retry = false;
2492
2493         if (likely(!nr_pages))
2494                 return;
2495
2496         memcg = get_mem_cgroup_from_mm(current->mm);
2497         current->memcg_nr_pages_over_high = 0;
2498
2499 retry_reclaim:
2500         /*
2501          * The allocating task should reclaim at least the batch size, but for
2502          * subsequent retries we only want to do what's necessary to prevent oom
2503          * or breaching resource isolation.
2504          *
2505          * This is distinct from memory.max or page allocator behaviour because
2506          * memory.high is currently batched, whereas memory.max and the page
2507          * allocator run every time an allocation is made.
2508          */
2509         nr_reclaimed = reclaim_high(memcg,
2510                                     in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2511                                     GFP_KERNEL);
2512
2513         /*
2514          * memory.high is breached and reclaim is unable to keep up. Throttle
2515          * allocators proactively to slow down excessive growth.
2516          */
2517         penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2518                                                mem_find_max_overage(memcg));
2519
2520         penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2521                                                 swap_find_max_overage(memcg));
2522
2523         /*
2524          * Clamp the max delay per usermode return so as to still keep the
2525          * application moving forwards and also permit diagnostics, albeit
2526          * extremely slowly.
2527          */
2528         penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2529
2530         /*
2531          * Don't sleep if the amount of jiffies this memcg owes us is so low
2532          * that it's not even worth doing, in an attempt to be nice to those who
2533          * go only a small amount over their memory.high value and maybe haven't
2534          * been aggressively reclaimed enough yet.
2535          */
2536         if (penalty_jiffies <= HZ / 100)
2537                 goto out;
2538
2539         /*
2540          * If reclaim is making forward progress but we're still over
2541          * memory.high, we want to encourage that rather than doing allocator
2542          * throttling.
2543          */
2544         if (nr_reclaimed || nr_retries--) {
2545                 in_retry = true;
2546                 goto retry_reclaim;
2547         }
2548
2549         /*
2550          * If we exit early, we're guaranteed to die (since
2551          * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2552          * need to account for any ill-begotten jiffies to pay them off later.
2553          */
2554         psi_memstall_enter(&pflags);
2555         schedule_timeout_killable(penalty_jiffies);
2556         psi_memstall_leave(&pflags);
2557
2558 out:
2559         css_put(&memcg->css);
2560 }
2561
2562 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2563                         unsigned int nr_pages)
2564 {
2565         unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2566         int nr_retries = MAX_RECLAIM_RETRIES;
2567         struct mem_cgroup *mem_over_limit;
2568         struct page_counter *counter;
2569         unsigned long nr_reclaimed;
2570         bool passed_oom = false;
2571         bool may_swap = true;
2572         bool drained = false;
2573         unsigned long pflags;
2574
2575 retry:
2576         if (consume_stock(memcg, nr_pages))
2577                 return 0;
2578
2579         if (!do_memsw_account() ||
2580             page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2581                 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2582                         goto done_restock;
2583                 if (do_memsw_account())
2584                         page_counter_uncharge(&memcg->memsw, batch);
2585                 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2586         } else {
2587                 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2588                 may_swap = false;
2589         }
2590
2591         if (batch > nr_pages) {
2592                 batch = nr_pages;
2593                 goto retry;
2594         }
2595
2596         /*
2597          * Prevent unbounded recursion when reclaim operations need to
2598          * allocate memory. This might exceed the limits temporarily,
2599          * but we prefer facilitating memory reclaim and getting back
2600          * under the limit over triggering OOM kills in these cases.
2601          */
2602         if (unlikely(current->flags & PF_MEMALLOC))
2603                 goto force;
2604
2605         if (unlikely(task_in_memcg_oom(current)))
2606                 goto nomem;
2607
2608         if (!gfpflags_allow_blocking(gfp_mask))
2609                 goto nomem;
2610
2611         memcg_memory_event(mem_over_limit, MEMCG_MAX);
2612
2613         psi_memstall_enter(&pflags);
2614         nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2615                                                     gfp_mask, may_swap);
2616         psi_memstall_leave(&pflags);
2617
2618         if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2619                 goto retry;
2620
2621         if (!drained) {
2622                 drain_all_stock(mem_over_limit);
2623                 drained = true;
2624                 goto retry;
2625         }
2626
2627         if (gfp_mask & __GFP_NORETRY)
2628                 goto nomem;
2629         /*
2630          * Even though the limit is exceeded at this point, reclaim
2631          * may have been able to free some pages.  Retry the charge
2632          * before killing the task.
2633          *
2634          * Only for regular pages, though: huge pages are rather
2635          * unlikely to succeed so close to the limit, and we fall back
2636          * to regular pages anyway in case of failure.
2637          */
2638         if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2639                 goto retry;
2640         /*
2641          * At task move, charge accounts can be doubly counted. So, it's
2642          * better to wait until the end of task_move if something is going on.
2643          */
2644         if (mem_cgroup_wait_acct_move(mem_over_limit))
2645                 goto retry;
2646
2647         if (nr_retries--)
2648                 goto retry;
2649
2650         if (gfp_mask & __GFP_RETRY_MAYFAIL)
2651                 goto nomem;
2652
2653         /* Avoid endless loop for tasks bypassed by the oom killer */
2654         if (passed_oom && task_is_dying())
2655                 goto nomem;
2656
2657         /*
2658          * keep retrying as long as the memcg oom killer is able to make
2659          * a forward progress or bypass the charge if the oom killer
2660          * couldn't make any progress.
2661          */
2662         if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2663                            get_order(nr_pages * PAGE_SIZE))) {
2664                 passed_oom = true;
2665                 nr_retries = MAX_RECLAIM_RETRIES;
2666                 goto retry;
2667         }
2668 nomem:
2669         /*
2670          * Memcg doesn't have a dedicated reserve for atomic
2671          * allocations. But like the global atomic pool, we need to
2672          * put the burden of reclaim on regular allocation requests
2673          * and let these go through as privileged allocations.
2674          */
2675         if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2676                 return -ENOMEM;
2677 force:
2678         /*
2679          * The allocation either can't fail or will lead to more memory
2680          * being freed very soon.  Allow memory usage go over the limit
2681          * temporarily by force charging it.
2682          */
2683         page_counter_charge(&memcg->memory, nr_pages);
2684         if (do_memsw_account())
2685                 page_counter_charge(&memcg->memsw, nr_pages);
2686
2687         return 0;
2688
2689 done_restock:
2690         if (batch > nr_pages)
2691                 refill_stock(memcg, batch - nr_pages);
2692
2693         /*
2694          * If the hierarchy is above the normal consumption range, schedule
2695          * reclaim on returning to userland.  We can perform reclaim here
2696          * if __GFP_RECLAIM but let's always punt for simplicity and so that
2697          * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2698          * not recorded as it most likely matches current's and won't
2699          * change in the meantime.  As high limit is checked again before
2700          * reclaim, the cost of mismatch is negligible.
2701          */
2702         do {
2703                 bool mem_high, swap_high;
2704
2705                 mem_high = page_counter_read(&memcg->memory) >
2706                         READ_ONCE(memcg->memory.high);
2707                 swap_high = page_counter_read(&memcg->swap) >
2708                         READ_ONCE(memcg->swap.high);
2709
2710                 /* Don't bother a random interrupted task */
2711                 if (!in_task()) {
2712                         if (mem_high) {
2713                                 schedule_work(&memcg->high_work);
2714                                 break;
2715                         }
2716                         continue;
2717                 }
2718
2719                 if (mem_high || swap_high) {
2720                         /*
2721                          * The allocating tasks in this cgroup will need to do
2722                          * reclaim or be throttled to prevent further growth
2723                          * of the memory or swap footprints.
2724                          *
2725                          * Target some best-effort fairness between the tasks,
2726                          * and distribute reclaim work and delay penalties
2727                          * based on how much each task is actually allocating.
2728                          */
2729                         current->memcg_nr_pages_over_high += batch;
2730                         set_notify_resume(current);
2731                         break;
2732                 }
2733         } while ((memcg = parent_mem_cgroup(memcg)));
2734
2735         if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2736             !(current->flags & PF_MEMALLOC) &&
2737             gfpflags_allow_blocking(gfp_mask)) {
2738                 mem_cgroup_handle_over_high();
2739         }
2740         return 0;
2741 }
2742
2743 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2744                              unsigned int nr_pages)
2745 {
2746         if (mem_cgroup_is_root(memcg))
2747                 return 0;
2748
2749         return try_charge_memcg(memcg, gfp_mask, nr_pages);
2750 }
2751
2752 static inline void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2753 {
2754         if (mem_cgroup_is_root(memcg))
2755                 return;
2756
2757         page_counter_uncharge(&memcg->memory, nr_pages);
2758         if (do_memsw_account())
2759                 page_counter_uncharge(&memcg->memsw, nr_pages);
2760 }
2761
2762 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2763 {
2764         VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2765         /*
2766          * Any of the following ensures page's memcg stability:
2767          *
2768          * - the page lock
2769          * - LRU isolation
2770          * - lock_page_memcg()
2771          * - exclusive reference
2772          */
2773         folio->memcg_data = (unsigned long)memcg;
2774 }
2775
2776 #ifdef CONFIG_MEMCG_KMEM
2777 /*
2778  * The allocated objcg pointers array is not accounted directly.
2779  * Moreover, it should not come from DMA buffer and is not readily
2780  * reclaimable. So those GFP bits should be masked off.
2781  */
2782 #define OBJCGS_CLEAR_MASK       (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
2783
2784 /*
2785  * mod_objcg_mlstate() may be called with irq enabled, so
2786  * mod_memcg_lruvec_state() should be used.
2787  */
2788 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2789                                      struct pglist_data *pgdat,
2790                                      enum node_stat_item idx, int nr)
2791 {
2792         struct mem_cgroup *memcg;
2793         struct lruvec *lruvec;
2794
2795         rcu_read_lock();
2796         memcg = obj_cgroup_memcg(objcg);
2797         lruvec = mem_cgroup_lruvec(memcg, pgdat);
2798         mod_memcg_lruvec_state(lruvec, idx, nr);
2799         rcu_read_unlock();
2800 }
2801
2802 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
2803                                  gfp_t gfp, bool new_slab)
2804 {
2805         unsigned int objects = objs_per_slab(s, slab);
2806         unsigned long memcg_data;
2807         void *vec;
2808
2809         gfp &= ~OBJCGS_CLEAR_MASK;
2810         vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2811                            slab_nid(slab));
2812         if (!vec)
2813                 return -ENOMEM;
2814
2815         memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
2816         if (new_slab) {
2817                 /*
2818                  * If the slab is brand new and nobody can yet access its
2819                  * memcg_data, no synchronization is required and memcg_data can
2820                  * be simply assigned.
2821                  */
2822                 slab->memcg_data = memcg_data;
2823         } else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
2824                 /*
2825                  * If the slab is already in use, somebody can allocate and
2826                  * assign obj_cgroups in parallel. In this case the existing
2827                  * objcg vector should be reused.
2828                  */
2829                 kfree(vec);
2830                 return 0;
2831         }
2832
2833         kmemleak_not_leak(vec);
2834         return 0;
2835 }
2836
2837 /*
2838  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2839  *
2840  * A passed kernel object can be a slab object or a generic kernel page, so
2841  * different mechanisms for getting the memory cgroup pointer should be used.
2842  * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
2843  * can not know for sure how the kernel object is implemented.
2844  * mem_cgroup_from_obj() can be safely used in such cases.
2845  *
2846  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2847  * cgroup_mutex, etc.
2848  */
2849 struct mem_cgroup *mem_cgroup_from_obj(void *p)
2850 {
2851         struct folio *folio;
2852
2853         if (mem_cgroup_disabled())
2854                 return NULL;
2855
2856         folio = virt_to_folio(p);
2857
2858         /*
2859          * Slab objects are accounted individually, not per-page.
2860          * Memcg membership data for each individual object is saved in
2861          * slab->memcg_data.
2862          */
2863         if (folio_test_slab(folio)) {
2864                 struct obj_cgroup **objcgs;
2865                 struct slab *slab;
2866                 unsigned int off;
2867
2868                 slab = folio_slab(folio);
2869                 objcgs = slab_objcgs(slab);
2870                 if (!objcgs)
2871                         return NULL;
2872
2873                 off = obj_to_index(slab->slab_cache, slab, p);
2874                 if (objcgs[off])
2875                         return obj_cgroup_memcg(objcgs[off]);
2876
2877                 return NULL;
2878         }
2879
2880         /*
2881          * page_memcg_check() is used here, because in theory we can encounter
2882          * a folio where the slab flag has been cleared already, but
2883          * slab->memcg_data has not been freed yet
2884          * page_memcg_check(page) will guarantee that a proper memory
2885          * cgroup pointer or NULL will be returned.
2886          */
2887         return page_memcg_check(folio_page(folio, 0));
2888 }
2889
2890 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
2891 {
2892         struct obj_cgroup *objcg = NULL;
2893         struct mem_cgroup *memcg;
2894
2895         if (memcg_kmem_bypass())
2896                 return NULL;
2897
2898         rcu_read_lock();
2899         if (unlikely(active_memcg()))
2900                 memcg = active_memcg();
2901         else
2902                 memcg = mem_cgroup_from_task(current);
2903
2904         for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
2905                 objcg = rcu_dereference(memcg->objcg);
2906                 if (objcg && obj_cgroup_tryget(objcg))
2907                         break;
2908                 objcg = NULL;
2909         }
2910         rcu_read_unlock();
2911
2912         return objcg;
2913 }
2914
2915 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2916 {
2917         mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
2918         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
2919                 if (nr_pages > 0)
2920                         page_counter_charge(&memcg->kmem, nr_pages);
2921                 else
2922                         page_counter_uncharge(&memcg->kmem, -nr_pages);
2923         }
2924 }
2925
2926
2927 /*
2928  * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2929  * @objcg: object cgroup to uncharge
2930  * @nr_pages: number of pages to uncharge
2931  */
2932 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
2933                                       unsigned int nr_pages)
2934 {
2935         struct mem_cgroup *memcg;
2936
2937         memcg = get_mem_cgroup_from_objcg(objcg);
2938
2939         memcg_account_kmem(memcg, -nr_pages);
2940         refill_stock(memcg, nr_pages);
2941
2942         css_put(&memcg->css);
2943 }
2944
2945 /*
2946  * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
2947  * @objcg: object cgroup to charge
2948  * @gfp: reclaim mode
2949  * @nr_pages: number of pages to charge
2950  *
2951  * Returns 0 on success, an error code on failure.
2952  */
2953 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
2954                                    unsigned int nr_pages)
2955 {
2956         struct mem_cgroup *memcg;
2957         int ret;
2958
2959         memcg = get_mem_cgroup_from_objcg(objcg);
2960
2961         ret = try_charge_memcg(memcg, gfp, nr_pages);
2962         if (ret)
2963                 goto out;
2964
2965         memcg_account_kmem(memcg, nr_pages);
2966 out:
2967         css_put(&memcg->css);
2968
2969         return ret;
2970 }
2971
2972 /**
2973  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2974  * @page: page to charge
2975  * @gfp: reclaim mode
2976  * @order: allocation order
2977  *
2978  * Returns 0 on success, an error code on failure.
2979  */
2980 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
2981 {
2982         struct obj_cgroup *objcg;
2983         int ret = 0;
2984
2985         objcg = get_obj_cgroup_from_current();
2986         if (objcg) {
2987                 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
2988                 if (!ret) {
2989                         page->memcg_data = (unsigned long)objcg |
2990                                 MEMCG_DATA_KMEM;
2991                         return 0;
2992                 }
2993                 obj_cgroup_put(objcg);
2994         }
2995         return ret;
2996 }
2997
2998 /**
2999  * __memcg_kmem_uncharge_page: uncharge a kmem page
3000  * @page: page to uncharge
3001  * @order: allocation order
3002  */
3003 void __memcg_kmem_uncharge_page(struct page *page, int order)
3004 {
3005         struct folio *folio = page_folio(page);
3006         struct obj_cgroup *objcg;
3007         unsigned int nr_pages = 1 << order;
3008
3009         if (!folio_memcg_kmem(folio))
3010                 return;
3011
3012         objcg = __folio_objcg(folio);
3013         obj_cgroup_uncharge_pages(objcg, nr_pages);
3014         folio->memcg_data = 0;
3015         obj_cgroup_put(objcg);
3016 }
3017
3018 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3019                      enum node_stat_item idx, int nr)
3020 {
3021         struct memcg_stock_pcp *stock;
3022         struct obj_cgroup *old = NULL;
3023         unsigned long flags;
3024         int *bytes;
3025
3026         local_lock_irqsave(&memcg_stock.stock_lock, flags);
3027         stock = this_cpu_ptr(&memcg_stock);
3028
3029         /*
3030          * Save vmstat data in stock and skip vmstat array update unless
3031          * accumulating over a page of vmstat data or when pgdat or idx
3032          * changes.
3033          */
3034         if (stock->cached_objcg != objcg) {
3035                 old = drain_obj_stock(stock);
3036                 obj_cgroup_get(objcg);
3037                 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3038                                 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3039                 stock->cached_objcg = objcg;
3040                 stock->cached_pgdat = pgdat;
3041         } else if (stock->cached_pgdat != pgdat) {
3042                 /* Flush the existing cached vmstat data */
3043                 struct pglist_data *oldpg = stock->cached_pgdat;
3044
3045                 if (stock->nr_slab_reclaimable_b) {
3046                         mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3047                                           stock->nr_slab_reclaimable_b);
3048                         stock->nr_slab_reclaimable_b = 0;
3049                 }
3050                 if (stock->nr_slab_unreclaimable_b) {
3051                         mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3052                                           stock->nr_slab_unreclaimable_b);
3053                         stock->nr_slab_unreclaimable_b = 0;
3054                 }
3055                 stock->cached_pgdat = pgdat;
3056         }
3057
3058         bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3059                                                : &stock->nr_slab_unreclaimable_b;
3060         /*
3061          * Even for large object >= PAGE_SIZE, the vmstat data will still be
3062          * cached locally at least once before pushing it out.
3063          */
3064         if (!*bytes) {
3065                 *bytes = nr;
3066                 nr = 0;
3067         } else {
3068                 *bytes += nr;
3069                 if (abs(*bytes) > PAGE_SIZE) {
3070                         nr = *bytes;
3071                         *bytes = 0;
3072                 } else {
3073                         nr = 0;
3074                 }
3075         }
3076         if (nr)
3077                 mod_objcg_mlstate(objcg, pgdat, idx, nr);
3078
3079         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3080         if (old)
3081                 obj_cgroup_put(old);
3082 }
3083
3084 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3085 {
3086         struct memcg_stock_pcp *stock;
3087         unsigned long flags;
3088         bool ret = false;
3089
3090         local_lock_irqsave(&memcg_stock.stock_lock, flags);
3091
3092         stock = this_cpu_ptr(&memcg_stock);
3093         if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3094                 stock->nr_bytes -= nr_bytes;
3095                 ret = true;
3096         }
3097
3098         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3099
3100         return ret;
3101 }
3102
3103 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
3104 {
3105         struct obj_cgroup *old = stock->cached_objcg;
3106
3107         if (!old)
3108                 return NULL;
3109
3110         if (stock->nr_bytes) {
3111                 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3112                 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3113
3114                 if (nr_pages) {
3115                         struct mem_cgroup *memcg;
3116
3117                         memcg = get_mem_cgroup_from_objcg(old);
3118
3119                         memcg_account_kmem(memcg, -nr_pages);
3120                         __refill_stock(memcg, nr_pages);
3121
3122                         css_put(&memcg->css);
3123                 }
3124
3125                 /*
3126                  * The leftover is flushed to the centralized per-memcg value.
3127                  * On the next attempt to refill obj stock it will be moved
3128                  * to a per-cpu stock (probably, on an other CPU), see
3129                  * refill_obj_stock().
3130                  *
3131                  * How often it's flushed is a trade-off between the memory
3132                  * limit enforcement accuracy and potential CPU contention,
3133                  * so it might be changed in the future.
3134                  */
3135                 atomic_add(nr_bytes, &old->nr_charged_bytes);
3136                 stock->nr_bytes = 0;
3137         }
3138
3139         /*
3140          * Flush the vmstat data in current stock
3141          */
3142         if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3143                 if (stock->nr_slab_reclaimable_b) {
3144                         mod_objcg_mlstate(old, stock->cached_pgdat,
3145                                           NR_SLAB_RECLAIMABLE_B,
3146                                           stock->nr_slab_reclaimable_b);
3147                         stock->nr_slab_reclaimable_b = 0;
3148                 }
3149                 if (stock->nr_slab_unreclaimable_b) {
3150                         mod_objcg_mlstate(old, stock->cached_pgdat,
3151                                           NR_SLAB_UNRECLAIMABLE_B,
3152                                           stock->nr_slab_unreclaimable_b);
3153                         stock->nr_slab_unreclaimable_b = 0;
3154                 }
3155                 stock->cached_pgdat = NULL;
3156         }
3157
3158         stock->cached_objcg = NULL;
3159         /*
3160          * The `old' objects needs to be released by the caller via
3161          * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3162          */
3163         return old;
3164 }
3165
3166 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3167                                      struct mem_cgroup *root_memcg)
3168 {
3169         struct mem_cgroup *memcg;
3170
3171         if (stock->cached_objcg) {
3172                 memcg = obj_cgroup_memcg(stock->cached_objcg);
3173                 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3174                         return true;
3175         }
3176
3177         return false;
3178 }
3179
3180 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3181                              bool allow_uncharge)
3182 {
3183         struct memcg_stock_pcp *stock;
3184         struct obj_cgroup *old = NULL;
3185         unsigned long flags;
3186         unsigned int nr_pages = 0;
3187
3188         local_lock_irqsave(&memcg_stock.stock_lock, flags);
3189
3190         stock = this_cpu_ptr(&memcg_stock);
3191         if (stock->cached_objcg != objcg) { /* reset if necessary */
3192                 old = drain_obj_stock(stock);
3193                 obj_cgroup_get(objcg);
3194                 stock->cached_objcg = objcg;
3195                 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3196                                 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3197                 allow_uncharge = true;  /* Allow uncharge when objcg changes */
3198         }
3199         stock->nr_bytes += nr_bytes;
3200
3201         if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3202                 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3203                 stock->nr_bytes &= (PAGE_SIZE - 1);
3204         }
3205
3206         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3207         if (old)
3208                 obj_cgroup_put(old);
3209
3210         if (nr_pages)
3211                 obj_cgroup_uncharge_pages(objcg, nr_pages);
3212 }
3213
3214 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3215 {
3216         unsigned int nr_pages, nr_bytes;
3217         int ret;
3218
3219         if (consume_obj_stock(objcg, size))
3220                 return 0;
3221
3222         /*
3223          * In theory, objcg->nr_charged_bytes can have enough
3224          * pre-charged bytes to satisfy the allocation. However,
3225          * flushing objcg->nr_charged_bytes requires two atomic
3226          * operations, and objcg->nr_charged_bytes can't be big.
3227          * The shared objcg->nr_charged_bytes can also become a
3228          * performance bottleneck if all tasks of the same memcg are
3229          * trying to update it. So it's better to ignore it and try
3230          * grab some new pages. The stock's nr_bytes will be flushed to
3231          * objcg->nr_charged_bytes later on when objcg changes.
3232          *
3233          * The stock's nr_bytes may contain enough pre-charged bytes
3234          * to allow one less page from being charged, but we can't rely
3235          * on the pre-charged bytes not being changed outside of
3236          * consume_obj_stock() or refill_obj_stock(). So ignore those
3237          * pre-charged bytes as well when charging pages. To avoid a
3238          * page uncharge right after a page charge, we set the
3239          * allow_uncharge flag to false when calling refill_obj_stock()
3240          * to temporarily allow the pre-charged bytes to exceed the page
3241          * size limit. The maximum reachable value of the pre-charged
3242          * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3243          * race.
3244          */
3245         nr_pages = size >> PAGE_SHIFT;
3246         nr_bytes = size & (PAGE_SIZE - 1);
3247
3248         if (nr_bytes)
3249                 nr_pages += 1;
3250
3251         ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3252         if (!ret && nr_bytes)
3253                 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3254
3255         return ret;
3256 }
3257
3258 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3259 {
3260         refill_obj_stock(objcg, size, true);
3261 }
3262
3263 #endif /* CONFIG_MEMCG_KMEM */
3264
3265 /*
3266  * Because page_memcg(head) is not set on tails, set it now.
3267  */
3268 void split_page_memcg(struct page *head, unsigned int nr)
3269 {
3270         struct folio *folio = page_folio(head);
3271         struct mem_cgroup *memcg = folio_memcg(folio);
3272         int i;
3273
3274         if (mem_cgroup_disabled() || !memcg)
3275                 return;
3276
3277         for (i = 1; i < nr; i++)
3278                 folio_page(folio, i)->memcg_data = folio->memcg_data;
3279
3280         if (folio_memcg_kmem(folio))
3281                 obj_cgroup_get_many(__folio_objcg(folio), nr - 1);
3282         else
3283                 css_get_many(&memcg->css, nr - 1);
3284 }
3285
3286 #ifdef CONFIG_MEMCG_SWAP
3287 /**
3288  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3289  * @entry: swap entry to be moved
3290  * @from:  mem_cgroup which the entry is moved from
3291  * @to:  mem_cgroup which the entry is moved to
3292  *
3293  * It succeeds only when the swap_cgroup's record for this entry is the same
3294  * as the mem_cgroup's id of @from.
3295  *
3296  * Returns 0 on success, -EINVAL on failure.
3297  *
3298  * The caller must have charged to @to, IOW, called page_counter_charge() about
3299  * both res and memsw, and called css_get().
3300  */
3301 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3302                                 struct mem_cgroup *from, struct mem_cgroup *to)
3303 {
3304         unsigned short old_id, new_id;
3305
3306         old_id = mem_cgroup_id(from);
3307         new_id = mem_cgroup_id(to);
3308
3309         if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3310                 mod_memcg_state(from, MEMCG_SWAP, -1);
3311                 mod_memcg_state(to, MEMCG_SWAP, 1);
3312                 return 0;
3313         }
3314         return -EINVAL;
3315 }
3316 #else
3317 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3318                                 struct mem_cgroup *from, struct mem_cgroup *to)
3319 {
3320         return -EINVAL;
3321 }
3322 #endif
3323
3324 static DEFINE_MUTEX(memcg_max_mutex);
3325
3326 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3327                                  unsigned long max, bool memsw)
3328 {
3329         bool enlarge = false;
3330         bool drained = false;
3331         int ret;
3332         bool limits_invariant;
3333         struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3334
3335         do {
3336                 if (signal_pending(current)) {
3337                         ret = -EINTR;
3338                         break;
3339                 }
3340
3341                 mutex_lock(&memcg_max_mutex);
3342                 /*
3343                  * Make sure that the new limit (memsw or memory limit) doesn't
3344                  * break our basic invariant rule memory.max <= memsw.max.
3345                  */
3346                 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3347                                            max <= memcg->memsw.max;
3348                 if (!limits_invariant) {
3349                         mutex_unlock(&memcg_max_mutex);
3350                         ret = -EINVAL;
3351                         break;
3352                 }
3353                 if (max > counter->max)
3354                         enlarge = true;
3355                 ret = page_counter_set_max(counter, max);
3356                 mutex_unlock(&memcg_max_mutex);
3357
3358                 if (!ret)
3359                         break;
3360
3361                 if (!drained) {
3362                         drain_all_stock(memcg);
3363                         drained = true;
3364                         continue;
3365                 }
3366
3367                 if (!try_to_free_mem_cgroup_pages(memcg, 1,
3368                                         GFP_KERNEL, !memsw)) {
3369                         ret = -EBUSY;
3370                         break;
3371                 }
3372         } while (true);
3373
3374         if (!ret && enlarge)
3375                 memcg_oom_recover(memcg);
3376
3377         return ret;
3378 }
3379
3380 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3381                                             gfp_t gfp_mask,
3382                                             unsigned long *total_scanned)
3383 {
3384         unsigned long nr_reclaimed = 0;
3385         struct mem_cgroup_per_node *mz, *next_mz = NULL;
3386         unsigned long reclaimed;
3387         int loop = 0;
3388         struct mem_cgroup_tree_per_node *mctz;
3389         unsigned long excess;
3390         unsigned long nr_scanned;
3391
3392         if (order > 0)
3393                 return 0;
3394
3395         mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
3396
3397         /*
3398          * Do not even bother to check the largest node if the root
3399          * is empty. Do it lockless to prevent lock bouncing. Races
3400          * are acceptable as soft limit is best effort anyway.
3401          */
3402         if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3403                 return 0;
3404
3405         /*
3406          * This loop can run a while, specially if mem_cgroup's continuously
3407          * keep exceeding their soft limit and putting the system under
3408          * pressure
3409          */
3410         do {
3411                 if (next_mz)
3412                         mz = next_mz;
3413                 else
3414                         mz = mem_cgroup_largest_soft_limit_node(mctz);
3415                 if (!mz)
3416                         break;
3417
3418                 nr_scanned = 0;
3419                 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3420                                                     gfp_mask, &nr_scanned);
3421                 nr_reclaimed += reclaimed;
3422                 *total_scanned += nr_scanned;
3423                 spin_lock_irq(&mctz->lock);
3424                 __mem_cgroup_remove_exceeded(mz, mctz);
3425
3426                 /*
3427                  * If we failed to reclaim anything from this memory cgroup
3428                  * it is time to move on to the next cgroup
3429                  */
3430                 next_mz = NULL;
3431                 if (!reclaimed)
3432                         next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3433
3434                 excess = soft_limit_excess(mz->memcg);
3435                 /*
3436                  * One school of thought says that we should not add
3437                  * back the node to the tree if reclaim returns 0.
3438                  * But our reclaim could return 0, simply because due
3439                  * to priority we are exposing a smaller subset of
3440                  * memory to reclaim from. Consider this as a longer
3441                  * term TODO.
3442                  */
3443                 /* If excess == 0, no tree ops */
3444                 __mem_cgroup_insert_exceeded(mz, mctz, excess);
3445                 spin_unlock_irq(&mctz->lock);
3446                 css_put(&mz->memcg->css);
3447                 loop++;
3448                 /*
3449                  * Could not reclaim anything and there are no more
3450                  * mem cgroups to try or we seem to be looping without
3451                  * reclaiming anything.
3452                  */
3453                 if (!nr_reclaimed &&
3454                         (next_mz == NULL ||
3455                         loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3456                         break;
3457         } while (!nr_reclaimed);
3458         if (next_mz)
3459                 css_put(&next_mz->memcg->css);
3460         return nr_reclaimed;
3461 }
3462
3463 /*
3464  * Reclaims as many pages from the given memcg as possible.
3465  *
3466  * Caller is responsible for holding css reference for memcg.
3467  */
3468 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3469 {
3470         int nr_retries = MAX_RECLAIM_RETRIES;
3471
3472         /* we call try-to-free pages for make this cgroup empty */
3473         lru_add_drain_all();
3474
3475         drain_all_stock(memcg);
3476
3477         /* try to free all pages in this cgroup */
3478         while (nr_retries && page_counter_read(&memcg->memory)) {
3479                 if (signal_pending(current))
3480                         return -EINTR;
3481
3482                 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true))
3483                         nr_retries--;
3484         }
3485
3486         return 0;
3487 }
3488
3489 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3490                                             char *buf, size_t nbytes,
3491                                             loff_t off)
3492 {
3493         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3494
3495         if (mem_cgroup_is_root(memcg))
3496                 return -EINVAL;
3497         return mem_cgroup_force_empty(memcg) ?: nbytes;
3498 }
3499
3500 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3501                                      struct cftype *cft)
3502 {
3503         return 1;
3504 }
3505
3506 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3507                                       struct cftype *cft, u64 val)
3508 {
3509         if (val == 1)
3510                 return 0;
3511
3512         pr_warn_once("Non-hierarchical mode is deprecated. "
3513                      "Please report your usecase to linux-mm@kvack.org if you "
3514                      "depend on this functionality.\n");
3515
3516         return -EINVAL;
3517 }
3518
3519 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3520 {
3521         unsigned long val;
3522
3523         if (mem_cgroup_is_root(memcg)) {
3524                 mem_cgroup_flush_stats();
3525                 val = memcg_page_state(memcg, NR_FILE_PAGES) +
3526                         memcg_page_state(memcg, NR_ANON_MAPPED);
3527                 if (swap)
3528                         val += memcg_page_state(memcg, MEMCG_SWAP);
3529         } else {
3530                 if (!swap)
3531                         val = page_counter_read(&memcg->memory);
3532                 else
3533                         val = page_counter_read(&memcg->memsw);
3534         }
3535         return val;
3536 }
3537
3538 enum {
3539         RES_USAGE,
3540         RES_LIMIT,
3541         RES_MAX_USAGE,
3542         RES_FAILCNT,
3543         RES_SOFT_LIMIT,
3544 };
3545
3546 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3547                                struct cftype *cft)
3548 {
3549         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3550         struct page_counter *counter;
3551
3552         switch (MEMFILE_TYPE(cft->private)) {
3553         case _MEM:
3554                 counter = &memcg->memory;
3555                 break;
3556         case _MEMSWAP:
3557                 counter = &memcg->memsw;
3558                 break;
3559         case _KMEM:
3560                 counter = &memcg->kmem;
3561                 break;
3562         case _TCP:
3563                 counter = &memcg->tcpmem;
3564                 break;
3565         default:
3566                 BUG();
3567         }
3568
3569         switch (MEMFILE_ATTR(cft->private)) {
3570         case RES_USAGE:
3571                 if (counter == &memcg->memory)
3572                         return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3573                 if (counter == &memcg->memsw)
3574                         return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3575                 return (u64)page_counter_read(counter) * PAGE_SIZE;
3576         case RES_LIMIT:
3577                 return (u64)counter->max * PAGE_SIZE;
3578         case RES_MAX_USAGE:
3579                 return (u64)counter->watermark * PAGE_SIZE;
3580         case RES_FAILCNT:
3581                 return counter->failcnt;
3582         case RES_SOFT_LIMIT:
3583                 return (u64)memcg->soft_limit * PAGE_SIZE;
3584         default:
3585                 BUG();
3586         }
3587 }
3588
3589 #ifdef CONFIG_MEMCG_KMEM
3590 static int memcg_online_kmem(struct mem_cgroup *memcg)
3591 {
3592         struct obj_cgroup *objcg;
3593
3594         if (cgroup_memory_nokmem)
3595                 return 0;
3596
3597         if (unlikely(mem_cgroup_is_root(memcg)))
3598                 return 0;
3599
3600         objcg = obj_cgroup_alloc();
3601         if (!objcg)
3602                 return -ENOMEM;
3603
3604         objcg->memcg = memcg;
3605         rcu_assign_pointer(memcg->objcg, objcg);
3606
3607         static_branch_enable(&memcg_kmem_enabled_key);
3608
3609         memcg->kmemcg_id = memcg->id.id;
3610
3611         return 0;
3612 }
3613
3614 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3615 {
3616         struct mem_cgroup *parent;
3617
3618         if (cgroup_memory_nokmem)
3619                 return;
3620
3621         if (unlikely(mem_cgroup_is_root(memcg)))
3622                 return;
3623
3624         parent = parent_mem_cgroup(memcg);
3625         if (!parent)
3626                 parent = root_mem_cgroup;
3627
3628         memcg_reparent_objcgs(memcg, parent);
3629
3630         /*
3631          * After we have finished memcg_reparent_objcgs(), all list_lrus
3632          * corresponding to this cgroup are guaranteed to remain empty.
3633          * The ordering is imposed by list_lru_node->lock taken by
3634          * memcg_reparent_list_lrus().
3635          */
3636         memcg_reparent_list_lrus(memcg, parent);
3637 }
3638 #else
3639 static int memcg_online_kmem(struct mem_cgroup *memcg)
3640 {
3641         return 0;
3642 }
3643 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3644 {
3645 }
3646 #endif /* CONFIG_MEMCG_KMEM */
3647
3648 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3649 {
3650         int ret;
3651
3652         mutex_lock(&memcg_max_mutex);
3653
3654         ret = page_counter_set_max(&memcg->tcpmem, max);
3655         if (ret)
3656                 goto out;
3657
3658         if (!memcg->tcpmem_active) {
3659                 /*
3660                  * The active flag needs to be written after the static_key
3661                  * update. This is what guarantees that the socket activation
3662                  * function is the last one to run. See mem_cgroup_sk_alloc()
3663                  * for details, and note that we don't mark any socket as
3664                  * belonging to this memcg until that flag is up.
3665                  *
3666                  * We need to do this, because static_keys will span multiple
3667                  * sites, but we can't control their order. If we mark a socket
3668                  * as accounted, but the accounting functions are not patched in
3669                  * yet, we'll lose accounting.
3670                  *
3671                  * We never race with the readers in mem_cgroup_sk_alloc(),
3672                  * because when this value change, the code to process it is not
3673                  * patched in yet.
3674                  */
3675                 static_branch_inc(&memcg_sockets_enabled_key);
3676                 memcg->tcpmem_active = true;
3677         }
3678 out:
3679         mutex_unlock(&memcg_max_mutex);
3680         return ret;
3681 }
3682
3683 /*
3684  * The user of this function is...
3685  * RES_LIMIT.
3686  */
3687 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3688                                 char *buf, size_t nbytes, loff_t off)
3689 {
3690         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3691         unsigned long nr_pages;
3692         int ret;
3693
3694         buf = strstrip(buf);
3695         ret = page_counter_memparse(buf, "-1", &nr_pages);
3696         if (ret)
3697                 return ret;
3698
3699         switch (MEMFILE_ATTR(of_cft(of)->private)) {
3700         case RES_LIMIT:
3701                 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3702                         ret = -EINVAL;
3703                         break;
3704                 }
3705                 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3706                 case _MEM:
3707                         ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3708                         break;
3709                 case _MEMSWAP:
3710                         ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3711                         break;
3712                 case _KMEM:
3713                         /* kmem.limit_in_bytes is deprecated. */
3714                         ret = -EOPNOTSUPP;
3715                         break;
3716                 case _TCP:
3717                         ret = memcg_update_tcp_max(memcg, nr_pages);
3718                         break;
3719                 }
3720                 break;
3721         case RES_SOFT_LIMIT:
3722                 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
3723                         ret = -EOPNOTSUPP;
3724                 } else {
3725                         memcg->soft_limit = nr_pages;
3726                         ret = 0;
3727                 }
3728                 break;
3729         }
3730         return ret ?: nbytes;
3731 }
3732
3733 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3734                                 size_t nbytes, loff_t off)
3735 {
3736         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3737         struct page_counter *counter;
3738
3739         switch (MEMFILE_TYPE(of_cft(of)->private)) {
3740         case _MEM:
3741                 counter = &memcg->memory;
3742                 break;
3743         case _MEMSWAP:
3744                 counter = &memcg->memsw;
3745                 break;
3746         case _KMEM:
3747                 counter = &memcg->kmem;
3748                 break;
3749         case _TCP:
3750                 counter = &memcg->tcpmem;
3751                 break;
3752         default:
3753                 BUG();
3754         }
3755
3756         switch (MEMFILE_ATTR(of_cft(of)->private)) {
3757         case RES_MAX_USAGE:
3758                 page_counter_reset_watermark(counter);
3759                 break;
3760         case RES_FAILCNT:
3761                 counter->failcnt = 0;
3762                 break;
3763         default:
3764                 BUG();
3765         }
3766
3767         return nbytes;
3768 }
3769
3770 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3771                                         struct cftype *cft)
3772 {
3773         return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3774 }
3775
3776 #ifdef CONFIG_MMU
3777 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3778                                         struct cftype *cft, u64 val)
3779 {
3780         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3781
3782         if (val & ~MOVE_MASK)
3783                 return -EINVAL;
3784
3785         /*
3786          * No kind of locking is needed in here, because ->can_attach() will
3787          * check this value once in the beginning of the process, and then carry
3788          * on with stale data. This means that changes to this value will only
3789          * affect task migrations starting after the change.
3790          */
3791         memcg->move_charge_at_immigrate = val;
3792         return 0;
3793 }
3794 #else
3795 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3796                                         struct cftype *cft, u64 val)
3797 {
3798         return -ENOSYS;
3799 }
3800 #endif
3801
3802 #ifdef CONFIG_NUMA
3803
3804 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3805 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3806 #define LRU_ALL      ((1 << NR_LRU_LISTS) - 1)
3807
3808 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3809                                 int nid, unsigned int lru_mask, bool tree)
3810 {
3811         struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3812         unsigned long nr = 0;
3813         enum lru_list lru;
3814
3815         VM_BUG_ON((unsigned)nid >= nr_node_ids);
3816
3817         for_each_lru(lru) {
3818                 if (!(BIT(lru) & lru_mask))
3819                         continue;
3820                 if (tree)
3821                         nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3822                 else
3823                         nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3824         }
3825         return nr;
3826 }
3827
3828 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3829                                              unsigned int lru_mask,
3830                                              bool tree)
3831 {
3832         unsigned long nr = 0;
3833         enum lru_list lru;
3834
3835         for_each_lru(lru) {
3836                 if (!(BIT(lru) & lru_mask))
3837                         continue;
3838                 if (tree)
3839                         nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3840                 else
3841                         nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3842         }
3843         return nr;
3844 }
3845
3846 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3847 {
3848         struct numa_stat {
3849                 const char *name;
3850                 unsigned int lru_mask;
3851         };
3852
3853         static const struct numa_stat stats[] = {
3854                 { "total", LRU_ALL },
3855                 { "file", LRU_ALL_FILE },
3856                 { "anon", LRU_ALL_ANON },
3857                 { "unevictable", BIT(LRU_UNEVICTABLE) },
3858         };
3859         const struct numa_stat *stat;
3860         int nid;
3861         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3862
3863         mem_cgroup_flush_stats();
3864
3865         for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3866                 seq_printf(m, "%s=%lu", stat->name,
3867                            mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3868                                                    false));
3869                 for_each_node_state(nid, N_MEMORY)
3870                         seq_printf(m, " N%d=%lu", nid,
3871                                    mem_cgroup_node_nr_lru_pages(memcg, nid,
3872                                                         stat->lru_mask, false));
3873                 seq_putc(m, '\n');
3874         }
3875
3876         for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3877
3878                 seq_printf(m, "hierarchical_%s=%lu", stat->name,
3879                            mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3880                                                    true));
3881                 for_each_node_state(nid, N_MEMORY)
3882                         seq_printf(m, " N%d=%lu", nid,
3883                                    mem_cgroup_node_nr_lru_pages(memcg, nid,
3884                                                         stat->lru_mask, true));
3885                 seq_putc(m, '\n');
3886         }
3887
3888         return 0;
3889 }
3890 #endif /* CONFIG_NUMA */
3891
3892 static const unsigned int memcg1_stats[] = {
3893         NR_FILE_PAGES,
3894         NR_ANON_MAPPED,
3895 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3896         NR_ANON_THPS,
3897 #endif
3898         NR_SHMEM,
3899         NR_FILE_MAPPED,
3900         NR_FILE_DIRTY,
3901         NR_WRITEBACK,
3902         MEMCG_SWAP,
3903 };
3904
3905 static const char *const memcg1_stat_names[] = {
3906         "cache",
3907         "rss",
3908 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3909         "rss_huge",
3910 #endif
3911         "shmem",
3912         "mapped_file",
3913         "dirty",
3914         "writeback",
3915         "swap",
3916 };
3917
3918 /* Universal VM events cgroup1 shows, original sort order */
3919 static const unsigned int memcg1_events[] = {
3920         PGPGIN,
3921         PGPGOUT,
3922         PGFAULT,
3923         PGMAJFAULT,
3924 };
3925
3926 static int memcg_stat_show(struct seq_file *m, void *v)
3927 {
3928         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3929         unsigned long memory, memsw;
3930         struct mem_cgroup *mi;
3931         unsigned int i;
3932
3933         BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
3934
3935         mem_cgroup_flush_stats();
3936
3937         for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3938                 unsigned long nr;
3939
3940                 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3941                         continue;
3942                 nr = memcg_page_state_local(memcg, memcg1_stats[i]);
3943                 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
3944         }
3945
3946         for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3947                 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
3948                            memcg_events_local(memcg, memcg1_events[i]));
3949
3950         for (i = 0; i < NR_LRU_LISTS; i++)
3951                 seq_printf(m, "%s %lu\n", lru_list_name(i),
3952                            memcg_page_state_local(memcg, NR_LRU_BASE + i) *
3953                            PAGE_SIZE);
3954
3955         /* Hierarchical information */
3956         memory = memsw = PAGE_COUNTER_MAX;
3957         for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3958                 memory = min(memory, READ_ONCE(mi->memory.max));
3959                 memsw = min(memsw, READ_ONCE(mi->memsw.max));
3960         }
3961         seq_printf(m, "hierarchical_memory_limit %llu\n",
3962                    (u64)memory * PAGE_SIZE);
3963         if (do_memsw_account())
3964                 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3965                            (u64)memsw * PAGE_SIZE);
3966
3967         for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3968                 unsigned long nr;
3969
3970                 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3971                         continue;
3972                 nr = memcg_page_state(memcg, memcg1_stats[i]);
3973                 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
3974                                                 (u64)nr * PAGE_SIZE);
3975         }
3976
3977         for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3978                 seq_printf(m, "total_%s %llu\n",
3979                            vm_event_name(memcg1_events[i]),
3980                            (u64)memcg_events(memcg, memcg1_events[i]));
3981
3982         for (i = 0; i < NR_LRU_LISTS; i++)
3983                 seq_printf(m, "total_%s %llu\n", lru_list_name(i),
3984                            (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
3985                            PAGE_SIZE);
3986
3987 #ifdef CONFIG_DEBUG_VM
3988         {
3989                 pg_data_t *pgdat;
3990                 struct mem_cgroup_per_node *mz;
3991                 unsigned long anon_cost = 0;
3992                 unsigned long file_cost = 0;
3993
3994                 for_each_online_pgdat(pgdat) {
3995                         mz = memcg->nodeinfo[pgdat->node_id];
3996
3997                         anon_cost += mz->lruvec.anon_cost;
3998                         file_cost += mz->lruvec.file_cost;
3999                 }
4000                 seq_printf(m, "anon_cost %lu\n", anon_cost);
4001                 seq_printf(m, "file_cost %lu\n", file_cost);
4002         }
4003 #endif
4004
4005         return 0;
4006 }
4007
4008 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4009                                       struct cftype *cft)
4010 {
4011         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4012
4013         return mem_cgroup_swappiness(memcg);
4014 }
4015
4016 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4017                                        struct cftype *cft, u64 val)
4018 {
4019         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4020
4021         if (val > 200)
4022                 return -EINVAL;
4023
4024         if (!mem_cgroup_is_root(memcg))
4025                 memcg->swappiness = val;
4026         else
4027                 vm_swappiness = val;
4028
4029         return 0;
4030 }
4031
4032 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4033 {
4034         struct mem_cgroup_threshold_ary *t;
4035         unsigned long usage;
4036         int i;
4037
4038         rcu_read_lock();
4039         if (!swap)
4040                 t = rcu_dereference(memcg->thresholds.primary);
4041         else
4042                 t = rcu_dereference(memcg->memsw_thresholds.primary);
4043
4044         if (!t)
4045                 goto unlock;
4046
4047         usage = mem_cgroup_usage(memcg, swap);
4048
4049         /*
4050          * current_threshold points to threshold just below or equal to usage.
4051          * If it's not true, a threshold was crossed after last
4052          * call of __mem_cgroup_threshold().
4053          */
4054         i = t->current_threshold;
4055
4056         /*
4057          * Iterate backward over array of thresholds starting from
4058          * current_threshold and check if a threshold is crossed.
4059          * If none of thresholds below usage is crossed, we read
4060          * only one element of the array here.
4061          */
4062         for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4063                 eventfd_signal(t->entries[i].eventfd, 1);
4064
4065         /* i = current_threshold + 1 */
4066         i++;
4067
4068         /*
4069          * Iterate forward over array of thresholds starting from
4070          * current_threshold+1 and check if a threshold is crossed.
4071          * If none of thresholds above usage is crossed, we read
4072          * only one element of the array here.
4073          */
4074         for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4075                 eventfd_signal(t->entries[i].eventfd, 1);
4076
4077         /* Update current_threshold */
4078         t->current_threshold = i - 1;
4079 unlock:
4080         rcu_read_unlock();
4081 }
4082
4083 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4084 {
4085         while (memcg) {
4086                 __mem_cgroup_threshold(memcg, false);
4087                 if (do_memsw_account())
4088                         __mem_cgroup_threshold(memcg, true);
4089
4090                 memcg = parent_mem_cgroup(memcg);
4091         }
4092 }
4093
4094 static int compare_thresholds(const void *a, const void *b)
4095 {
4096         const struct mem_cgroup_threshold *_a = a;
4097         const struct mem_cgroup_threshold *_b = b;
4098
4099         if (_a->threshold > _b->threshold)
4100                 return 1;
4101
4102         if (_a->threshold < _b->threshold)
4103                 return -1;
4104
4105         return 0;
4106 }
4107
4108 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4109 {
4110         struct mem_cgroup_eventfd_list *ev;
4111
4112         spin_lock(&memcg_oom_lock);
4113
4114         list_for_each_entry(ev, &memcg->oom_notify, list)
4115                 eventfd_signal(ev->eventfd, 1);
4116
4117         spin_unlock(&memcg_oom_lock);
4118         return 0;
4119 }
4120
4121 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4122 {
4123         struct mem_cgroup *iter;
4124
4125         for_each_mem_cgroup_tree(iter, memcg)
4126                 mem_cgroup_oom_notify_cb(iter);
4127 }
4128
4129 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4130         struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4131 {
4132         struct mem_cgroup_thresholds *thresholds;
4133         struct mem_cgroup_threshold_ary *new;
4134         unsigned long threshold;
4135         unsigned long usage;
4136         int i, size, ret;
4137
4138         ret = page_counter_memparse(args, "-1", &threshold);
4139         if (ret)
4140                 return ret;
4141
4142         mutex_lock(&memcg->thresholds_lock);
4143
4144         if (type == _MEM) {
4145                 thresholds = &memcg->thresholds;
4146                 usage = mem_cgroup_usage(memcg, false);
4147         } else if (type == _MEMSWAP) {
4148                 thresholds = &memcg->memsw_thresholds;
4149                 usage = mem_cgroup_usage(memcg, true);
4150         } else
4151                 BUG();
4152
4153         /* Check if a threshold crossed before adding a new one */
4154         if (thresholds->primary)
4155                 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4156
4157         size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4158
4159         /* Allocate memory for new array of thresholds */
4160         new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4161         if (!new) {
4162                 ret = -ENOMEM;
4163                 goto unlock;
4164         }
4165         new->size = size;
4166
4167         /* Copy thresholds (if any) to new array */
4168         if (thresholds->primary)
4169                 memcpy(new->entries, thresholds->primary->entries,
4170                        flex_array_size(new, entries, size - 1));
4171
4172         /* Add new threshold */
4173         new->entries[size - 1].eventfd = eventfd;
4174         new->entries[size - 1].threshold = threshold;
4175
4176         /* Sort thresholds. Registering of new threshold isn't time-critical */
4177         sort(new->entries, size, sizeof(*new->entries),
4178                         compare_thresholds, NULL);
4179
4180         /* Find current threshold */
4181         new->current_threshold = -1;
4182         for (i = 0; i < size; i++) {
4183                 if (new->entries[i].threshold <= usage) {
4184                         /*
4185                          * new->current_threshold will not be used until
4186                          * rcu_assign_pointer(), so it's safe to increment
4187                          * it here.
4188                          */
4189                         ++new->current_threshold;
4190                 } else
4191                         break;
4192         }
4193
4194         /* Free old spare buffer and save old primary buffer as spare */
4195         kfree(thresholds->spare);
4196         thresholds->spare = thresholds->primary;
4197
4198         rcu_assign_pointer(thresholds->primary, new);
4199
4200         /* To be sure that nobody uses thresholds */
4201         synchronize_rcu();
4202
4203 unlock:
4204         mutex_unlock(&memcg->thresholds_lock);
4205
4206         return ret;
4207 }
4208
4209 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4210         struct eventfd_ctx *eventfd, const char *args)
4211 {
4212         return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4213 }
4214
4215 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4216         struct eventfd_ctx *eventfd, const char *args)
4217 {
4218         return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4219 }
4220
4221 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4222         struct eventfd_ctx *eventfd, enum res_type type)
4223 {
4224         struct mem_cgroup_thresholds *thresholds;
4225         struct mem_cgroup_threshold_ary *new;
4226         unsigned long usage;
4227         int i, j, size, entries;
4228
4229         mutex_lock(&memcg->thresholds_lock);
4230
4231         if (type == _MEM) {
4232                 thresholds = &memcg->thresholds;
4233                 usage = mem_cgroup_usage(memcg, false);
4234         } else if (type == _MEMSWAP) {
4235                 thresholds = &memcg->memsw_thresholds;
4236                 usage = mem_cgroup_usage(memcg, true);
4237         } else
4238                 BUG();
4239
4240         if (!thresholds->primary)
4241                 goto unlock;
4242
4243         /* Check if a threshold crossed before removing */
4244         __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4245
4246         /* Calculate new number of threshold */
4247         size = entries = 0;
4248         for (i = 0; i < thresholds->primary->size; i++) {
4249                 if (thresholds->primary->entries[i].eventfd != eventfd)
4250                         size++;
4251                 else
4252                         entries++;
4253         }
4254
4255         new = thresholds->spare;
4256
4257         /* If no items related to eventfd have been cleared, nothing to do */
4258         if (!entries)
4259                 goto unlock;
4260
4261         /* Set thresholds array to NULL if we don't have thresholds */
4262         if (!size) {
4263                 kfree(new);
4264                 new = NULL;
4265                 goto swap_buffers;
4266         }
4267
4268         new->size = size;
4269
4270         /* Copy thresholds and find current threshold */
4271         new->current_threshold = -1;
4272         for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4273                 if (thresholds->primary->entries[i].eventfd == eventfd)
4274                         continue;
4275
4276                 new->entries[j] = thresholds->primary->entries[i];
4277                 if (new->entries[j].threshold <= usage) {
4278                         /*
4279                          * new->current_threshold will not be used
4280                          * until rcu_assign_pointer(), so it's safe to increment
4281                          * it here.
4282                          */
4283                         ++new->current_threshold;
4284                 }
4285                 j++;
4286         }
4287
4288 swap_buffers:
4289         /* Swap primary and spare array */
4290         thresholds->spare = thresholds->primary;
4291
4292         rcu_assign_pointer(thresholds->primary, new);
4293
4294         /* To be sure that nobody uses thresholds */
4295         synchronize_rcu();
4296
4297         /* If all events are unregistered, free the spare array */
4298         if (!new) {
4299                 kfree(thresholds->spare);
4300                 thresholds->spare = NULL;
4301         }
4302 unlock:
4303         mutex_unlock(&memcg->thresholds_lock);
4304 }
4305
4306 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4307         struct eventfd_ctx *eventfd)
4308 {
4309         return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4310 }
4311
4312 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4313         struct eventfd_ctx *eventfd)
4314 {
4315         return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4316 }
4317
4318 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4319         struct eventfd_ctx *eventfd, const char *args)
4320 {
4321         struct mem_cgroup_eventfd_list *event;
4322
4323         event = kmalloc(sizeof(*event), GFP_KERNEL);
4324         if (!event)
4325                 return -ENOMEM;
4326
4327         spin_lock(&memcg_oom_lock);
4328
4329         event->eventfd = eventfd;
4330         list_add(&event->list, &memcg->oom_notify);
4331
4332         /* already in OOM ? */
4333         if (memcg->under_oom)
4334                 eventfd_signal(eventfd, 1);
4335         spin_unlock(&memcg_oom_lock);
4336
4337         return 0;
4338 }
4339
4340 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4341         struct eventfd_ctx *eventfd)
4342 {
4343         struct mem_cgroup_eventfd_list *ev, *tmp;
4344
4345         spin_lock(&memcg_oom_lock);
4346
4347         list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4348                 if (ev->eventfd == eventfd) {
4349                         list_del(&ev->list);
4350                         kfree(ev);
4351                 }
4352         }
4353
4354         spin_unlock(&memcg_oom_lock);
4355 }
4356
4357 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4358 {
4359         struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4360
4361         seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4362         seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4363         seq_printf(sf, "oom_kill %lu\n",
4364                    atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4365         return 0;
4366 }
4367
4368 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4369         struct cftype *cft, u64 val)
4370 {
4371         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4372
4373         /* cannot set to root cgroup and only 0 and 1 are allowed */
4374         if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4375                 return -EINVAL;
4376
4377         memcg->oom_kill_disable = val;
4378         if (!val)
4379                 memcg_oom_recover(memcg);
4380
4381         return 0;
4382 }
4383
4384 #ifdef CONFIG_CGROUP_WRITEBACK
4385
4386 #include <trace/events/writeback.h>
4387
4388 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4389 {
4390         return wb_domain_init(&memcg->cgwb_domain, gfp);
4391 }
4392
4393 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4394 {
4395         wb_domain_exit(&memcg->cgwb_domain);
4396 }
4397
4398 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4399 {
4400         wb_domain_size_changed(&memcg->cgwb_domain);
4401 }
4402
4403 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4404 {
4405         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4406
4407         if (!memcg->css.parent)
4408                 return NULL;
4409
4410         return &memcg->cgwb_domain;
4411 }
4412
4413 /**
4414  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4415  * @wb: bdi_writeback in question
4416  * @pfilepages: out parameter for number of file pages
4417  * @pheadroom: out parameter for number of allocatable pages according to memcg
4418  * @pdirty: out parameter for number of dirty pages
4419  * @pwriteback: out parameter for number of pages under writeback
4420  *
4421  * Determine the numbers of file, headroom, dirty, and writeback pages in
4422  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4423  * is a bit more involved.
4424  *
4425  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4426  * headroom is calculated as the lowest headroom of itself and the
4427  * ancestors.  Note that this doesn't consider the actual amount of
4428  * available memory in the system.  The caller should further cap
4429  * *@pheadroom accordingly.
4430  */
4431 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4432                          unsigned long *pheadroom, unsigned long *pdirty,
4433                          unsigned long *pwriteback)
4434 {
4435         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4436         struct mem_cgroup *parent;
4437
4438         mem_cgroup_flush_stats();
4439
4440         *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4441         *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4442         *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4443                         memcg_page_state(memcg, NR_ACTIVE_FILE);
4444
4445         *pheadroom = PAGE_COUNTER_MAX;
4446         while ((parent = parent_mem_cgroup(memcg))) {
4447                 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4448                                             READ_ONCE(memcg->memory.high));
4449                 unsigned long used = page_counter_read(&memcg->memory);
4450
4451                 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4452                 memcg = parent;
4453         }
4454 }
4455
4456 /*
4457  * Foreign dirty flushing
4458  *
4459  * There's an inherent mismatch between memcg and writeback.  The former
4460  * tracks ownership per-page while the latter per-inode.  This was a
4461  * deliberate design decision because honoring per-page ownership in the
4462  * writeback path is complicated, may lead to higher CPU and IO overheads
4463  * and deemed unnecessary given that write-sharing an inode across
4464  * different cgroups isn't a common use-case.
4465  *
4466  * Combined with inode majority-writer ownership switching, this works well
4467  * enough in most cases but there are some pathological cases.  For
4468  * example, let's say there are two cgroups A and B which keep writing to
4469  * different but confined parts of the same inode.  B owns the inode and
4470  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4471  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4472  * triggering background writeback.  A will be slowed down without a way to
4473  * make writeback of the dirty pages happen.
4474  *
4475  * Conditions like the above can lead to a cgroup getting repeatedly and
4476  * severely throttled after making some progress after each
4477  * dirty_expire_interval while the underlying IO device is almost
4478  * completely idle.
4479  *
4480  * Solving this problem completely requires matching the ownership tracking
4481  * granularities between memcg and writeback in either direction.  However,
4482  * the more egregious behaviors can be avoided by simply remembering the
4483  * most recent foreign dirtying events and initiating remote flushes on
4484  * them when local writeback isn't enough to keep the memory clean enough.
4485  *
4486  * The following two functions implement such mechanism.  When a foreign
4487  * page - a page whose memcg and writeback ownerships don't match - is
4488  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4489  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4490  * decides that the memcg needs to sleep due to high dirty ratio, it calls
4491  * mem_cgroup_flush_foreign() which queues writeback on the recorded
4492  * foreign bdi_writebacks which haven't expired.  Both the numbers of
4493  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4494  * limited to MEMCG_CGWB_FRN_CNT.
4495  *
4496  * The mechanism only remembers IDs and doesn't hold any object references.
4497  * As being wrong occasionally doesn't matter, updates and accesses to the
4498  * records are lockless and racy.
4499  */
4500 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4501                                              struct bdi_writeback *wb)
4502 {
4503         struct mem_cgroup *memcg = folio_memcg(folio);
4504         struct memcg_cgwb_frn *frn;
4505         u64 now = get_jiffies_64();
4506         u64 oldest_at = now;
4507         int oldest = -1;
4508         int i;
4509
4510         trace_track_foreign_dirty(folio, wb);
4511
4512         /*
4513          * Pick the slot to use.  If there is already a slot for @wb, keep
4514          * using it.  If not replace the oldest one which isn't being
4515          * written out.
4516          */
4517         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4518                 frn = &memcg->cgwb_frn[i];
4519                 if (frn->bdi_id == wb->bdi->id &&
4520                     frn->memcg_id == wb->memcg_css->id)
4521                         break;
4522                 if (time_before64(frn->at, oldest_at) &&
4523                     atomic_read(&frn->done.cnt) == 1) {
4524                         oldest = i;
4525                         oldest_at = frn->at;
4526                 }
4527         }
4528
4529         if (i < MEMCG_CGWB_FRN_CNT) {
4530                 /*
4531                  * Re-using an existing one.  Update timestamp lazily to
4532                  * avoid making the cacheline hot.  We want them to be
4533                  * reasonably up-to-date and significantly shorter than
4534                  * dirty_expire_interval as that's what expires the record.
4535                  * Use the shorter of 1s and dirty_expire_interval / 8.
4536                  */
4537                 unsigned long update_intv =
4538                         min_t(unsigned long, HZ,
4539                               msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4540
4541                 if (time_before64(frn->at, now - update_intv))
4542                         frn->at = now;
4543         } else if (oldest >= 0) {
4544                 /* replace the oldest free one */
4545                 frn = &memcg->cgwb_frn[oldest];
4546                 frn->bdi_id = wb->bdi->id;
4547                 frn->memcg_id = wb->memcg_css->id;
4548                 frn->at = now;
4549         }
4550 }
4551
4552 /* issue foreign writeback flushes for recorded foreign dirtying events */
4553 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4554 {
4555         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4556         unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4557         u64 now = jiffies_64;
4558         int i;
4559
4560         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4561                 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4562
4563                 /*
4564                  * If the record is older than dirty_expire_interval,
4565                  * writeback on it has already started.  No need to kick it
4566                  * off again.  Also, don't start a new one if there's
4567                  * already one in flight.
4568                  */
4569                 if (time_after64(frn->at, now - intv) &&
4570                     atomic_read(&frn->done.cnt) == 1) {
4571                         frn->at = 0;
4572                         trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4573                         cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4574                                                WB_REASON_FOREIGN_FLUSH,
4575                                                &frn->done);
4576                 }
4577         }
4578 }
4579
4580 #else   /* CONFIG_CGROUP_WRITEBACK */
4581
4582 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4583 {
4584         return 0;
4585 }
4586
4587 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4588 {
4589 }
4590
4591 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4592 {
4593 }
4594
4595 #endif  /* CONFIG_CGROUP_WRITEBACK */
4596
4597 /*
4598  * DO NOT USE IN NEW FILES.
4599  *
4600  * "cgroup.event_control" implementation.
4601  *
4602  * This is way over-engineered.  It tries to support fully configurable
4603  * events for each user.  Such level of flexibility is completely
4604  * unnecessary especially in the light of the planned unified hierarchy.
4605  *
4606  * Please deprecate this and replace with something simpler if at all
4607  * possible.
4608  */
4609
4610 /*
4611  * Unregister event and free resources.
4612  *
4613  * Gets called from workqueue.
4614  */
4615 static void memcg_event_remove(struct work_struct *work)
4616 {
4617         struct mem_cgroup_event *event =
4618                 container_of(work, struct mem_cgroup_event, remove);
4619         struct mem_cgroup *memcg = event->memcg;
4620
4621         remove_wait_queue(event->wqh, &event->wait);
4622
4623         event->unregister_event(memcg, event->eventfd);
4624
4625         /* Notify userspace the event is going away. */
4626         eventfd_signal(event->eventfd, 1);
4627
4628         eventfd_ctx_put(event->eventfd);
4629         kfree(event);
4630         css_put(&memcg->css);
4631 }
4632
4633 /*
4634  * Gets called on EPOLLHUP on eventfd when user closes it.
4635  *
4636  * Called with wqh->lock held and interrupts disabled.
4637  */
4638 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4639                             int sync, void *key)
4640 {
4641         struct mem_cgroup_event *event =
4642                 container_of(wait, struct mem_cgroup_event, wait);
4643         struct mem_cgroup *memcg = event->memcg;
4644         __poll_t flags = key_to_poll(key);
4645
4646         if (flags & EPOLLHUP) {
4647                 /*
4648                  * If the event has been detached at cgroup removal, we
4649                  * can simply return knowing the other side will cleanup
4650                  * for us.
4651                  *
4652                  * We can't race against event freeing since the other
4653                  * side will require wqh->lock via remove_wait_queue(),
4654                  * which we hold.
4655                  */
4656                 spin_lock(&memcg->event_list_lock);
4657                 if (!list_empty(&event->list)) {
4658                         list_del_init(&event->list);
4659                         /*
4660                          * We are in atomic context, but cgroup_event_remove()
4661                          * may sleep, so we have to call it in workqueue.
4662                          */
4663                         schedule_work(&event->remove);
4664                 }
4665                 spin_unlock(&memcg->event_list_lock);
4666         }
4667
4668         return 0;
4669 }
4670
4671 static void memcg_event_ptable_queue_proc(struct file *file,
4672                 wait_queue_head_t *wqh, poll_table *pt)
4673 {
4674         struct mem_cgroup_event *event =
4675                 container_of(pt, struct mem_cgroup_event, pt);
4676
4677         event->wqh = wqh;
4678         add_wait_queue(wqh, &event->wait);
4679 }
4680
4681 /*
4682  * DO NOT USE IN NEW FILES.
4683  *
4684  * Parse input and register new cgroup event handler.
4685  *
4686  * Input must be in format '<event_fd> <control_fd> <args>'.
4687  * Interpretation of args is defined by control file implementation.
4688  */
4689 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4690                                          char *buf, size_t nbytes, loff_t off)
4691 {
4692         struct cgroup_subsys_state *css = of_css(of);
4693         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4694         struct mem_cgroup_event *event;
4695         struct cgroup_subsys_state *cfile_css;
4696         unsigned int efd, cfd;
4697         struct fd efile;
4698         struct fd cfile;
4699         const char *name;
4700         char *endp;
4701         int ret;
4702
4703         if (IS_ENABLED(CONFIG_PREEMPT_RT))
4704                 return -EOPNOTSUPP;
4705
4706         buf = strstrip(buf);
4707
4708         efd = simple_strtoul(buf, &endp, 10);
4709         if (*endp != ' ')
4710                 return -EINVAL;
4711         buf = endp + 1;
4712
4713         cfd = simple_strtoul(buf, &endp, 10);
4714         if ((*endp != ' ') && (*endp != '\0'))
4715                 return -EINVAL;
4716         buf = endp + 1;
4717
4718         event = kzalloc(sizeof(*event), GFP_KERNEL);
4719         if (!event)
4720                 return -ENOMEM;
4721
4722         event->memcg = memcg;
4723         INIT_LIST_HEAD(&event->list);
4724         init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4725         init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4726         INIT_WORK(&event->remove, memcg_event_remove);
4727
4728         efile = fdget(efd);
4729         if (!efile.file) {
4730                 ret = -EBADF;
4731                 goto out_kfree;
4732         }
4733
4734         event->eventfd = eventfd_ctx_fileget(efile.file);
4735         if (IS_ERR(event->eventfd)) {
4736                 ret = PTR_ERR(event->eventfd);
4737                 goto out_put_efile;
4738         }
4739
4740         cfile = fdget(cfd);
4741         if (!cfile.file) {
4742                 ret = -EBADF;
4743                 goto out_put_eventfd;
4744         }
4745
4746         /* the process need read permission on control file */
4747         /* AV: shouldn't we check that it's been opened for read instead? */
4748         ret = file_permission(cfile.file, MAY_READ);
4749         if (ret < 0)
4750                 goto out_put_cfile;
4751
4752         /*
4753          * Determine the event callbacks and set them in @event.  This used
4754          * to be done via struct cftype but cgroup core no longer knows
4755          * about these events.  The following is crude but the whole thing
4756          * is for compatibility anyway.
4757          *
4758          * DO NOT ADD NEW FILES.
4759          */
4760         name = cfile.file->f_path.dentry->d_name.name;
4761
4762         if (!strcmp(name, "memory.usage_in_bytes")) {
4763                 event->register_event = mem_cgroup_usage_register_event;
4764                 event->unregister_event = mem_cgroup_usage_unregister_event;
4765         } else if (!strcmp(name, "memory.oom_control")) {
4766                 event->register_event = mem_cgroup_oom_register_event;
4767                 event->unregister_event = mem_cgroup_oom_unregister_event;
4768         } else if (!strcmp(name, "memory.pressure_level")) {
4769                 event->register_event = vmpressure_register_event;
4770                 event->unregister_event = vmpressure_unregister_event;
4771         } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4772                 event->register_event = memsw_cgroup_usage_register_event;
4773                 event->unregister_event = memsw_cgroup_usage_unregister_event;
4774         } else {
4775                 ret = -EINVAL;
4776                 goto out_put_cfile;
4777         }
4778
4779         /*
4780          * Verify @cfile should belong to @css.  Also, remaining events are
4781          * automatically removed on cgroup destruction but the removal is
4782          * asynchronous, so take an extra ref on @css.
4783          */
4784         cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4785                                                &memory_cgrp_subsys);
4786         ret = -EINVAL;
4787         if (IS_ERR(cfile_css))
4788                 goto out_put_cfile;
4789         if (cfile_css != css) {
4790                 css_put(cfile_css);
4791                 goto out_put_cfile;
4792         }
4793
4794         ret = event->register_event(memcg, event->eventfd, buf);
4795         if (ret)
4796                 goto out_put_css;
4797
4798         vfs_poll(efile.file, &event->pt);
4799
4800         spin_lock_irq(&memcg->event_list_lock);
4801         list_add(&event->list, &memcg->event_list);
4802         spin_unlock_irq(&memcg->event_list_lock);
4803
4804         fdput(cfile);
4805         fdput(efile);
4806
4807         return nbytes;
4808
4809 out_put_css:
4810         css_put(css);
4811 out_put_cfile:
4812         fdput(cfile);
4813 out_put_eventfd:
4814         eventfd_ctx_put(event->eventfd);
4815 out_put_efile:
4816         fdput(efile);
4817 out_kfree:
4818         kfree(event);
4819
4820         return ret;
4821 }
4822
4823 #if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
4824 static int mem_cgroup_slab_show(struct seq_file *m, void *p)
4825 {
4826         /*
4827          * Deprecated.
4828          * Please, take a look at tools/cgroup/slabinfo.py .
4829          */
4830         return 0;
4831 }
4832 #endif
4833
4834 static struct cftype mem_cgroup_legacy_files[] = {
4835         {
4836                 .name = "usage_in_bytes",
4837                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4838                 .read_u64 = mem_cgroup_read_u64,
4839         },
4840         {
4841                 .name = "max_usage_in_bytes",
4842                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4843                 .write = mem_cgroup_reset,
4844                 .read_u64 = mem_cgroup_read_u64,
4845         },
4846         {
4847                 .name = "limit_in_bytes",
4848                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4849                 .write = mem_cgroup_write,
4850                 .read_u64 = mem_cgroup_read_u64,
4851         },
4852         {
4853                 .name = "soft_limit_in_bytes",
4854                 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4855                 .write = mem_cgroup_write,
4856                 .read_u64 = mem_cgroup_read_u64,
4857         },
4858         {
4859                 .name = "failcnt",
4860                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4861                 .write = mem_cgroup_reset,
4862                 .read_u64 = mem_cgroup_read_u64,
4863         },
4864         {
4865                 .name = "stat",
4866                 .seq_show = memcg_stat_show,
4867         },
4868         {
4869                 .name = "force_empty",
4870                 .write = mem_cgroup_force_empty_write,
4871         },
4872         {
4873                 .name = "use_hierarchy",
4874                 .write_u64 = mem_cgroup_hierarchy_write,
4875                 .read_u64 = mem_cgroup_hierarchy_read,
4876         },
4877         {
4878                 .name = "cgroup.event_control",         /* XXX: for compat */
4879                 .write = memcg_write_event_control,
4880                 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4881         },
4882         {
4883                 .name = "swappiness",
4884                 .read_u64 = mem_cgroup_swappiness_read,
4885                 .write_u64 = mem_cgroup_swappiness_write,
4886         },
4887         {
4888                 .name = "move_charge_at_immigrate",
4889                 .read_u64 = mem_cgroup_move_charge_read,
4890                 .write_u64 = mem_cgroup_move_charge_write,
4891         },
4892         {
4893                 .name = "oom_control",
4894                 .seq_show = mem_cgroup_oom_control_read,
4895                 .write_u64 = mem_cgroup_oom_control_write,
4896                 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4897         },
4898         {
4899                 .name = "pressure_level",
4900         },
4901 #ifdef CONFIG_NUMA
4902         {
4903                 .name = "numa_stat",
4904                 .seq_show = memcg_numa_stat_show,
4905         },
4906 #endif
4907         {
4908                 .name = "kmem.limit_in_bytes",
4909                 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4910                 .write = mem_cgroup_write,
4911                 .read_u64 = mem_cgroup_read_u64,
4912         },
4913         {
4914                 .name = "kmem.usage_in_bytes",
4915                 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4916                 .read_u64 = mem_cgroup_read_u64,
4917         },
4918         {
4919                 .name = "kmem.failcnt",
4920                 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4921                 .write = mem_cgroup_reset,
4922                 .read_u64 = mem_cgroup_read_u64,
4923         },
4924         {
4925                 .name = "kmem.max_usage_in_bytes",
4926                 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4927                 .write = mem_cgroup_reset,
4928                 .read_u64 = mem_cgroup_read_u64,
4929         },
4930 #if defined(CONFIG_MEMCG_KMEM) && \
4931         (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
4932         {
4933                 .name = "kmem.slabinfo",
4934                 .seq_show = mem_cgroup_slab_show,
4935         },
4936 #endif
4937         {
4938                 .name = "kmem.tcp.limit_in_bytes",
4939                 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4940                 .write = mem_cgroup_write,
4941                 .read_u64 = mem_cgroup_read_u64,
4942         },
4943         {
4944                 .name = "kmem.tcp.usage_in_bytes",
4945                 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4946                 .read_u64 = mem_cgroup_read_u64,
4947         },
4948         {
4949                 .name = "kmem.tcp.failcnt",
4950                 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4951                 .write = mem_cgroup_reset,
4952                 .read_u64 = mem_cgroup_read_u64,
4953         },
4954         {
4955                 .name = "kmem.tcp.max_usage_in_bytes",
4956                 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4957                 .write = mem_cgroup_reset,
4958                 .read_u64 = mem_cgroup_read_u64,
4959         },
4960         { },    /* terminate */
4961 };
4962
4963 /*
4964  * Private memory cgroup IDR
4965  *
4966  * Swap-out records and page cache shadow entries need to store memcg
4967  * references in constrained space, so we maintain an ID space that is
4968  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
4969  * memory-controlled cgroups to 64k.
4970  *
4971  * However, there usually are many references to the offline CSS after
4972  * the cgroup has been destroyed, such as page cache or reclaimable
4973  * slab objects, that don't need to hang on to the ID. We want to keep
4974  * those dead CSS from occupying IDs, or we might quickly exhaust the
4975  * relatively small ID space and prevent the creation of new cgroups
4976  * even when there are much fewer than 64k cgroups - possibly none.
4977  *
4978  * Maintain a private 16-bit ID space for memcg, and allow the ID to
4979  * be freed and recycled when it's no longer needed, which is usually
4980  * when the CSS is offlined.
4981  *
4982  * The only exception to that are records of swapped out tmpfs/shmem
4983  * pages that need to be attributed to live ancestors on swapin. But
4984  * those references are manageable from userspace.
4985  */
4986
4987 static DEFINE_IDR(mem_cgroup_idr);
4988
4989 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
4990 {
4991         if (memcg->id.id > 0) {
4992                 idr_remove(&mem_cgroup_idr, memcg->id.id);
4993                 memcg->id.id = 0;
4994         }
4995 }
4996
4997 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
4998                                                   unsigned int n)
4999 {
5000         refcount_add(n, &memcg->id.ref);
5001 }
5002
5003 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5004 {
5005         if (refcount_sub_and_test(n, &memcg->id.ref)) {
5006                 mem_cgroup_id_remove(memcg);
5007
5008                 /* Memcg ID pins CSS */
5009                 css_put(&memcg->css);
5010         }
5011 }
5012
5013 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5014 {
5015         mem_cgroup_id_put_many(memcg, 1);
5016 }
5017
5018 /**
5019  * mem_cgroup_from_id - look up a memcg from a memcg id
5020  * @id: the memcg id to look up
5021  *
5022  * Caller must hold rcu_read_lock().
5023  */
5024 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5025 {
5026         WARN_ON_ONCE(!rcu_read_lock_held());
5027         return idr_find(&mem_cgroup_idr, id);
5028 }
5029
5030 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5031 {
5032         struct mem_cgroup_per_node *pn;
5033
5034         pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
5035         if (!pn)
5036                 return 1;
5037
5038         pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5039                                                    GFP_KERNEL_ACCOUNT);
5040         if (!pn->lruvec_stats_percpu) {
5041                 kfree(pn);
5042                 return 1;
5043         }
5044
5045         lruvec_init(&pn->lruvec);
5046         pn->memcg = memcg;
5047
5048         memcg->nodeinfo[node] = pn;
5049         return 0;
5050 }
5051
5052 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5053 {
5054         struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5055
5056         if (!pn)
5057                 return;
5058
5059         free_percpu(pn->lruvec_stats_percpu);
5060         kfree(pn);
5061 }
5062
5063 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5064 {
5065         int node;
5066
5067         for_each_node(node)
5068                 free_mem_cgroup_per_node_info(memcg, node);
5069         free_percpu(memcg->vmstats_percpu);
5070         kfree(memcg);
5071 }
5072
5073 static void mem_cgroup_free(struct mem_cgroup *memcg)
5074 {
5075         memcg_wb_domain_exit(memcg);
5076         __mem_cgroup_free(memcg);
5077 }
5078
5079 static struct mem_cgroup *mem_cgroup_alloc(void)
5080 {
5081         struct mem_cgroup *memcg;
5082         int node;
5083         int __maybe_unused i;
5084         long error = -ENOMEM;
5085
5086         memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
5087         if (!memcg)
5088                 return ERR_PTR(error);
5089
5090         memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5091                                  1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
5092         if (memcg->id.id < 0) {
5093                 error = memcg->id.id;
5094                 goto fail;
5095         }
5096
5097         memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5098                                                  GFP_KERNEL_ACCOUNT);
5099         if (!memcg->vmstats_percpu)
5100                 goto fail;
5101
5102         for_each_node(node)
5103                 if (alloc_mem_cgroup_per_node_info(memcg, node))
5104                         goto fail;
5105
5106         if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5107                 goto fail;
5108
5109         INIT_WORK(&memcg->high_work, high_work_func);
5110         INIT_LIST_HEAD(&memcg->oom_notify);
5111         mutex_init(&memcg->thresholds_lock);
5112         spin_lock_init(&memcg->move_lock);
5113         vmpressure_init(&memcg->vmpressure);
5114         INIT_LIST_HEAD(&memcg->event_list);
5115         spin_lock_init(&memcg->event_list_lock);
5116         memcg->socket_pressure = jiffies;
5117 #ifdef CONFIG_MEMCG_KMEM
5118         memcg->kmemcg_id = -1;
5119         INIT_LIST_HEAD(&memcg->objcg_list);
5120 #endif
5121 #ifdef CONFIG_CGROUP_WRITEBACK
5122         INIT_LIST_HEAD(&memcg->cgwb_list);
5123         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5124                 memcg->cgwb_frn[i].done =
5125                         __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5126 #endif
5127 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5128         spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5129         INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5130         memcg->deferred_split_queue.split_queue_len = 0;
5131 #endif
5132         idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5133         return memcg;
5134 fail:
5135         mem_cgroup_id_remove(memcg);
5136         __mem_cgroup_free(memcg);
5137         return ERR_PTR(error);
5138 }
5139
5140 static struct cgroup_subsys_state * __ref
5141 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5142 {
5143         struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5144         struct mem_cgroup *memcg, *old_memcg;
5145
5146         old_memcg = set_active_memcg(parent);
5147         memcg = mem_cgroup_alloc();
5148         set_active_memcg(old_memcg);
5149         if (IS_ERR(memcg))
5150                 return ERR_CAST(memcg);
5151
5152         page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5153         memcg->soft_limit = PAGE_COUNTER_MAX;
5154         page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5155         if (parent) {
5156                 memcg->swappiness = mem_cgroup_swappiness(parent);
5157                 memcg->oom_kill_disable = parent->oom_kill_disable;
5158
5159                 page_counter_init(&memcg->memory, &parent->memory);
5160                 page_counter_init(&memcg->swap, &parent->swap);
5161                 page_counter_init(&memcg->kmem, &parent->kmem);
5162                 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5163         } else {
5164                 page_counter_init(&memcg->memory, NULL);
5165                 page_counter_init(&memcg->swap, NULL);
5166                 page_counter_init(&memcg->kmem, NULL);
5167                 page_counter_init(&memcg->tcpmem, NULL);
5168
5169                 root_mem_cgroup = memcg;
5170                 return &memcg->css;
5171         }
5172
5173         if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5174                 static_branch_inc(&memcg_sockets_enabled_key);
5175
5176         return &memcg->css;
5177 }
5178
5179 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5180 {
5181         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5182
5183         if (memcg_online_kmem(memcg))
5184                 goto remove_id;
5185
5186         /*
5187          * A memcg must be visible for expand_shrinker_info()
5188          * by the time the maps are allocated. So, we allocate maps
5189          * here, when for_each_mem_cgroup() can't skip it.
5190          */
5191         if (alloc_shrinker_info(memcg))
5192                 goto offline_kmem;
5193
5194         /* Online state pins memcg ID, memcg ID pins CSS */
5195         refcount_set(&memcg->id.ref, 1);
5196         css_get(css);
5197
5198         if (unlikely(mem_cgroup_is_root(memcg)))
5199                 queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5200                                    2UL*HZ);
5201         return 0;
5202 offline_kmem:
5203         memcg_offline_kmem(memcg);
5204 remove_id:
5205         mem_cgroup_id_remove(memcg);
5206         return -ENOMEM;
5207 }
5208
5209 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5210 {
5211         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5212         struct mem_cgroup_event *event, *tmp;
5213
5214         /*
5215          * Unregister events and notify userspace.
5216          * Notify userspace about cgroup removing only after rmdir of cgroup
5217          * directory to avoid race between userspace and kernelspace.
5218          */
5219         spin_lock_irq(&memcg->event_list_lock);
5220         list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5221                 list_del_init(&event->list);
5222                 schedule_work(&event->remove);
5223         }
5224         spin_unlock_irq(&memcg->event_list_lock);
5225
5226         page_counter_set_min(&memcg->memory, 0);
5227         page_counter_set_low(&memcg->memory, 0);
5228
5229         memcg_offline_kmem(memcg);
5230         reparent_shrinker_deferred(memcg);
5231         wb_memcg_offline(memcg);
5232
5233         drain_all_stock(memcg);
5234
5235         mem_cgroup_id_put(memcg);
5236 }
5237
5238 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5239 {
5240         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5241
5242         invalidate_reclaim_iterators(memcg);
5243 }
5244
5245 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5246 {
5247         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5248         int __maybe_unused i;
5249
5250 #ifdef CONFIG_CGROUP_WRITEBACK
5251         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5252                 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5253 #endif
5254         if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5255                 static_branch_dec(&memcg_sockets_enabled_key);
5256
5257         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5258                 static_branch_dec(&memcg_sockets_enabled_key);
5259
5260         vmpressure_cleanup(&memcg->vmpressure);
5261         cancel_work_sync(&memcg->high_work);
5262         mem_cgroup_remove_from_trees(memcg);
5263         free_shrinker_info(memcg);
5264         mem_cgroup_free(memcg);
5265 }
5266
5267 /**
5268  * mem_cgroup_css_reset - reset the states of a mem_cgroup
5269  * @css: the target css
5270  *
5271  * Reset the states of the mem_cgroup associated with @css.  This is
5272  * invoked when the userland requests disabling on the default hierarchy
5273  * but the memcg is pinned through dependency.  The memcg should stop
5274  * applying policies and should revert to the vanilla state as it may be
5275  * made visible again.
5276  *
5277  * The current implementation only resets the essential configurations.
5278  * This needs to be expanded to cover all the visible parts.
5279  */
5280 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5281 {
5282         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5283
5284         page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5285         page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5286         page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5287         page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5288         page_counter_set_min(&memcg->memory, 0);
5289         page_counter_set_low(&memcg->memory, 0);
5290         page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5291         memcg->soft_limit = PAGE_COUNTER_MAX;
5292         page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5293         memcg_wb_domain_size_changed(memcg);
5294 }
5295
5296 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5297 {
5298         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5299         struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5300         struct memcg_vmstats_percpu *statc;
5301         long delta, v;
5302         int i, nid;
5303
5304         statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5305
5306         for (i = 0; i < MEMCG_NR_STAT; i++) {
5307                 /*
5308                  * Collect the aggregated propagation counts of groups
5309                  * below us. We're in a per-cpu loop here and this is
5310                  * a global counter, so the first cycle will get them.
5311                  */
5312                 delta = memcg->vmstats.state_pending[i];
5313                 if (delta)
5314                         memcg->vmstats.state_pending[i] = 0;
5315
5316                 /* Add CPU changes on this level since the last flush */
5317                 v = READ_ONCE(statc->state[i]);
5318                 if (v != statc->state_prev[i]) {
5319                         delta += v - statc->state_prev[i];
5320                         statc->state_prev[i] = v;
5321                 }
5322
5323                 if (!delta)
5324                         continue;
5325
5326                 /* Aggregate counts on this level and propagate upwards */
5327                 memcg->vmstats.state[i] += delta;
5328                 if (parent)
5329                         parent->vmstats.state_pending[i] += delta;
5330         }
5331
5332         for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
5333                 delta = memcg->vmstats.events_pending[i];
5334                 if (delta)
5335                         memcg->vmstats.events_pending[i] = 0;
5336
5337                 v = READ_ONCE(statc->events[i]);
5338                 if (v != statc->events_prev[i]) {
5339                         delta += v - statc->events_prev[i];
5340                         statc->events_prev[i] = v;
5341                 }
5342
5343                 if (!delta)
5344                         continue;
5345
5346                 memcg->vmstats.events[i] += delta;
5347                 if (parent)
5348                         parent->vmstats.events_pending[i] += delta;
5349         }
5350
5351         for_each_node_state(nid, N_MEMORY) {
5352                 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5353                 struct mem_cgroup_per_node *ppn = NULL;
5354                 struct lruvec_stats_percpu *lstatc;
5355
5356                 if (parent)
5357                         ppn = parent->nodeinfo[nid];
5358
5359                 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5360
5361                 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5362                         delta = pn->lruvec_stats.state_pending[i];
5363                         if (delta)
5364                                 pn->lruvec_stats.state_pending[i] = 0;
5365
5366                         v = READ_ONCE(lstatc->state[i]);
5367                         if (v != lstatc->state_prev[i]) {
5368                                 delta += v - lstatc->state_prev[i];
5369                                 lstatc->state_prev[i] = v;
5370                         }
5371
5372                         if (!delta)
5373                                 continue;
5374
5375                         pn->lruvec_stats.state[i] += delta;
5376                         if (ppn)
5377                                 ppn->lruvec_stats.state_pending[i] += delta;
5378                 }
5379         }
5380 }
5381
5382 #ifdef CONFIG_MMU
5383 /* Handlers for move charge at task migration. */
5384 static int mem_cgroup_do_precharge(unsigned long count)
5385 {
5386         int ret;
5387
5388         /* Try a single bulk charge without reclaim first, kswapd may wake */
5389         ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5390         if (!ret) {
5391                 mc.precharge += count;
5392                 return ret;
5393         }
5394
5395         /* Try charges one by one with reclaim, but do not retry */
5396         while (count--) {
5397                 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5398                 if (ret)
5399                         return ret;
5400                 mc.precharge++;
5401                 cond_resched();
5402         }
5403         return 0;
5404 }
5405
5406 union mc_target {
5407         struct page     *page;
5408         swp_entry_t     ent;
5409 };
5410
5411 enum mc_target_type {
5412         MC_TARGET_NONE = 0,
5413         MC_TARGET_PAGE,
5414         MC_TARGET_SWAP,
5415         MC_TARGET_DEVICE,
5416 };
5417
5418 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5419                                                 unsigned long addr, pte_t ptent)
5420 {
5421         struct page *page = vm_normal_page(vma, addr, ptent);
5422
5423         if (!page || !page_mapped(page))
5424                 return NULL;
5425         if (PageAnon(page)) {
5426                 if (!(mc.flags & MOVE_ANON))
5427                         return NULL;
5428         } else {
5429                 if (!(mc.flags & MOVE_FILE))
5430                         return NULL;
5431         }
5432         if (!get_page_unless_zero(page))
5433                 return NULL;
5434
5435         return page;
5436 }
5437
5438 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5439 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5440                         pte_t ptent, swp_entry_t *entry)
5441 {
5442         struct page *page = NULL;
5443         swp_entry_t ent = pte_to_swp_entry(ptent);
5444
5445         if (!(mc.flags & MOVE_ANON))
5446                 return NULL;
5447
5448         /*
5449          * Handle device private pages that are not accessible by the CPU, but
5450          * stored as special swap entries in the page table.
5451          */
5452         if (is_device_private_entry(ent)) {
5453                 page = pfn_swap_entry_to_page(ent);
5454                 if (!get_page_unless_zero(page))
5455                         return NULL;
5456                 return page;
5457         }
5458
5459         if (non_swap_entry(ent))
5460                 return NULL;
5461
5462         /*
5463          * Because lookup_swap_cache() updates some statistics counter,
5464          * we call find_get_page() with swapper_space directly.
5465          */
5466         page = find_get_page(swap_address_space(ent), swp_offset(ent));
5467         entry->val = ent.val;
5468
5469         return page;
5470 }
5471 #else
5472 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5473                         pte_t ptent, swp_entry_t *entry)
5474 {
5475         return NULL;
5476 }
5477 #endif
5478
5479 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5480                         unsigned long addr, pte_t ptent)
5481 {
5482         if (!vma->vm_file) /* anonymous vma */
5483                 return NULL;
5484         if (!(mc.flags & MOVE_FILE))
5485                 return NULL;
5486
5487         /* page is moved even if it's not RSS of this task(page-faulted). */
5488         /* shmem/tmpfs may report page out on swap: account for that too. */
5489         return find_get_incore_page(vma->vm_file->f_mapping,
5490                         linear_page_index(vma, addr));
5491 }
5492
5493 /**
5494  * mem_cgroup_move_account - move account of the page
5495  * @page: the page
5496  * @compound: charge the page as compound or small page
5497  * @from: mem_cgroup which the page is moved from.
5498  * @to: mem_cgroup which the page is moved to. @from != @to.
5499  *
5500  * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5501  *
5502  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5503  * from old cgroup.
5504  */
5505 static int mem_cgroup_move_account(struct page *page,
5506                                    bool compound,
5507                                    struct mem_cgroup *from,
5508                                    struct mem_cgroup *to)
5509 {
5510         struct folio *folio = page_folio(page);
5511         struct lruvec *from_vec, *to_vec;
5512         struct pglist_data *pgdat;
5513         unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5514         int nid, ret;
5515
5516         VM_BUG_ON(from == to);
5517         VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5518         VM_BUG_ON(compound && !folio_test_large(folio));
5519
5520         /*
5521          * Prevent mem_cgroup_migrate() from looking at
5522          * page's memory cgroup of its source page while we change it.
5523          */
5524         ret = -EBUSY;
5525         if (!folio_trylock(folio))
5526                 goto out;
5527
5528         ret = -EINVAL;
5529         if (folio_memcg(folio) != from)
5530                 goto out_unlock;
5531
5532         pgdat = folio_pgdat(folio);
5533         from_vec = mem_cgroup_lruvec(from, pgdat);
5534         to_vec = mem_cgroup_lruvec(to, pgdat);
5535
5536         folio_memcg_lock(folio);
5537
5538         if (folio_test_anon(folio)) {
5539                 if (folio_mapped(folio)) {
5540                         __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5541                         __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5542                         if (folio_test_transhuge(folio)) {
5543                                 __mod_lruvec_state(from_vec, NR_ANON_THPS,
5544                                                    -nr_pages);
5545                                 __mod_lruvec_state(to_vec, NR_ANON_THPS,
5546                                                    nr_pages);
5547                         }
5548                 }
5549         } else {
5550                 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5551                 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5552
5553                 if (folio_test_swapbacked(folio)) {
5554                         __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5555                         __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5556                 }
5557
5558                 if (folio_mapped(folio)) {
5559                         __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5560                         __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5561                 }
5562
5563                 if (folio_test_dirty(folio)) {
5564                         struct address_space *mapping = folio_mapping(folio);
5565
5566                         if (mapping_can_writeback(mapping)) {
5567                                 __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5568                                                    -nr_pages);
5569                                 __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5570                                                    nr_pages);
5571                         }
5572                 }
5573         }
5574
5575         if (folio_test_writeback(folio)) {
5576                 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5577                 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5578         }
5579
5580         /*
5581          * All state has been migrated, let's switch to the new memcg.
5582          *
5583          * It is safe to change page's memcg here because the page
5584          * is referenced, charged, isolated, and locked: we can't race
5585          * with (un)charging, migration, LRU putback, or anything else
5586          * that would rely on a stable page's memory cgroup.
5587          *
5588          * Note that lock_page_memcg is a memcg lock, not a page lock,
5589          * to save space. As soon as we switch page's memory cgroup to a
5590          * new memcg that isn't locked, the above state can change
5591          * concurrently again. Make sure we're truly done with it.
5592          */
5593         smp_mb();
5594
5595         css_get(&to->css);
5596         css_put(&from->css);
5597
5598         folio->memcg_data = (unsigned long)to;
5599
5600         __folio_memcg_unlock(from);
5601
5602         ret = 0;
5603         nid = folio_nid(folio);
5604
5605         local_irq_disable();
5606         mem_cgroup_charge_statistics(to, nr_pages);
5607         memcg_check_events(to, nid);
5608         mem_cgroup_charge_statistics(from, -nr_pages);
5609         memcg_check_events(from, nid);
5610         local_irq_enable();
5611 out_unlock:
5612         folio_unlock(folio);
5613 out:
5614         return ret;
5615 }
5616
5617 /**
5618  * get_mctgt_type - get target type of moving charge
5619  * @vma: the vma the pte to be checked belongs
5620  * @addr: the address corresponding to the pte to be checked
5621  * @ptent: the pte to be checked
5622  * @target: the pointer the target page or swap ent will be stored(can be NULL)
5623  *
5624  * Returns
5625  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5626  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5627  *     move charge. if @target is not NULL, the page is stored in target->page
5628  *     with extra refcnt got(Callers should handle it).
5629  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5630  *     target for charge migration. if @target is not NULL, the entry is stored
5631  *     in target->ent.
5632  *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PRIVATE
5633  *     (so ZONE_DEVICE page and thus not on the lru).
5634  *     For now we such page is charge like a regular page would be as for all
5635  *     intent and purposes it is just special memory taking the place of a
5636  *     regular page.
5637  *
5638  *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5639  *
5640  * Called with pte lock held.
5641  */
5642
5643 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5644                 unsigned long addr, pte_t ptent, union mc_target *target)
5645 {
5646         struct page *page = NULL;
5647         enum mc_target_type ret = MC_TARGET_NONE;
5648         swp_entry_t ent = { .val = 0 };
5649
5650         if (pte_present(ptent))
5651                 page = mc_handle_present_pte(vma, addr, ptent);
5652         else if (is_swap_pte(ptent))
5653                 page = mc_handle_swap_pte(vma, ptent, &ent);
5654         else if (pte_none(ptent))
5655                 page = mc_handle_file_pte(vma, addr, ptent);
5656
5657         if (!page && !ent.val)
5658                 return ret;
5659         if (page) {
5660                 /*
5661                  * Do only loose check w/o serialization.
5662                  * mem_cgroup_move_account() checks the page is valid or
5663                  * not under LRU exclusion.
5664                  */
5665                 if (page_memcg(page) == mc.from) {
5666                         ret = MC_TARGET_PAGE;
5667                         if (is_device_private_page(page))
5668                                 ret = MC_TARGET_DEVICE;
5669                         if (target)
5670                                 target->page = page;
5671                 }
5672                 if (!ret || !target)
5673                         put_page(page);
5674         }
5675         /*
5676          * There is a swap entry and a page doesn't exist or isn't charged.
5677          * But we cannot move a tail-page in a THP.
5678          */
5679         if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5680             mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5681                 ret = MC_TARGET_SWAP;
5682                 if (target)
5683                         target->ent = ent;
5684         }
5685         return ret;
5686 }
5687
5688 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5689 /*
5690  * We don't consider PMD mapped swapping or file mapped pages because THP does
5691  * not support them for now.
5692  * Caller should make sure that pmd_trans_huge(pmd) is true.
5693  */
5694 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5695                 unsigned long addr, pmd_t pmd, union mc_target *target)
5696 {
5697         struct page *page = NULL;
5698         enum mc_target_type ret = MC_TARGET_NONE;
5699
5700         if (unlikely(is_swap_pmd(pmd))) {
5701                 VM_BUG_ON(thp_migration_supported() &&
5702                                   !is_pmd_migration_entry(pmd));
5703                 return ret;
5704         }
5705         page = pmd_page(pmd);
5706         VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5707         if (!(mc.flags & MOVE_ANON))
5708                 return ret;
5709         if (page_memcg(page) == mc.from) {
5710                 ret = MC_TARGET_PAGE;
5711                 if (target) {
5712                         get_page(page);
5713                         target->page = page;
5714                 }
5715         }
5716         return ret;
5717 }
5718 #else
5719 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5720                 unsigned long addr, pmd_t pmd, union mc_target *target)
5721 {
5722         return MC_TARGET_NONE;
5723 }
5724 #endif
5725
5726 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5727                                         unsigned long addr, unsigned long end,
5728                                         struct mm_walk *walk)
5729 {
5730         struct vm_area_struct *vma = walk->vma;
5731         pte_t *pte;
5732         spinlock_t *ptl;
5733
5734         ptl = pmd_trans_huge_lock(pmd, vma);
5735         if (ptl) {
5736                 /*
5737                  * Note their can not be MC_TARGET_DEVICE for now as we do not
5738                  * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5739                  * this might change.
5740                  */
5741                 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5742                         mc.precharge += HPAGE_PMD_NR;
5743                 spin_unlock(ptl);
5744                 return 0;
5745         }
5746
5747         if (pmd_trans_unstable(pmd))
5748                 return 0;
5749         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5750         for (; addr != end; pte++, addr += PAGE_SIZE)
5751                 if (get_mctgt_type(vma, addr, *pte, NULL))
5752                         mc.precharge++; /* increment precharge temporarily */
5753         pte_unmap_unlock(pte - 1, ptl);
5754         cond_resched();
5755
5756         return 0;
5757 }
5758
5759 static const struct mm_walk_ops precharge_walk_ops = {
5760         .pmd_entry      = mem_cgroup_count_precharge_pte_range,
5761 };
5762
5763 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5764 {
5765         unsigned long precharge;
5766
5767         mmap_read_lock(mm);
5768         walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5769         mmap_read_unlock(mm);
5770
5771         precharge = mc.precharge;
5772         mc.precharge = 0;
5773
5774         return precharge;
5775 }
5776
5777 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5778 {
5779         unsigned long precharge = mem_cgroup_count_precharge(mm);
5780
5781         VM_BUG_ON(mc.moving_task);
5782         mc.moving_task = current;
5783         return mem_cgroup_do_precharge(precharge);
5784 }
5785
5786 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5787 static void __mem_cgroup_clear_mc(void)
5788 {
5789         struct mem_cgroup *from = mc.from;
5790         struct mem_cgroup *to = mc.to;
5791
5792         /* we must uncharge all the leftover precharges from mc.to */
5793         if (mc.precharge) {
5794                 cancel_charge(mc.to, mc.precharge);
5795                 mc.precharge = 0;
5796         }
5797         /*
5798          * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5799          * we must uncharge here.
5800          */
5801         if (mc.moved_charge) {
5802                 cancel_charge(mc.from, mc.moved_charge);
5803                 mc.moved_charge = 0;
5804         }
5805         /* we must fixup refcnts and charges */
5806         if (mc.moved_swap) {
5807                 /* uncharge swap account from the old cgroup */
5808                 if (!mem_cgroup_is_root(mc.from))
5809                         page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5810
5811                 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5812
5813                 /*
5814                  * we charged both to->memory and to->memsw, so we
5815                  * should uncharge to->memory.
5816                  */
5817                 if (!mem_cgroup_is_root(mc.to))
5818                         page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5819
5820                 mc.moved_swap = 0;
5821         }
5822         memcg_oom_recover(from);
5823         memcg_oom_recover(to);
5824         wake_up_all(&mc.waitq);
5825 }
5826
5827 static void mem_cgroup_clear_mc(void)
5828 {
5829         struct mm_struct *mm = mc.mm;
5830
5831         /*
5832          * we must clear moving_task before waking up waiters at the end of
5833          * task migration.
5834          */
5835         mc.moving_task = NULL;
5836         __mem_cgroup_clear_mc();
5837         spin_lock(&mc.lock);
5838         mc.from = NULL;
5839         mc.to = NULL;
5840         mc.mm = NULL;
5841         spin_unlock(&mc.lock);
5842
5843         mmput(mm);
5844 }
5845
5846 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5847 {
5848         struct cgroup_subsys_state *css;
5849         struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5850         struct mem_cgroup *from;
5851         struct task_struct *leader, *p;
5852         struct mm_struct *mm;
5853         unsigned long move_flags;
5854         int ret = 0;
5855
5856         /* charge immigration isn't supported on the default hierarchy */
5857         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5858                 return 0;
5859
5860         /*
5861          * Multi-process migrations only happen on the default hierarchy
5862          * where charge immigration is not used.  Perform charge
5863          * immigration if @tset contains a leader and whine if there are
5864          * multiple.
5865          */
5866         p = NULL;
5867         cgroup_taskset_for_each_leader(leader, css, tset) {
5868                 WARN_ON_ONCE(p);
5869                 p = leader;
5870                 memcg = mem_cgroup_from_css(css);
5871         }
5872         if (!p)
5873                 return 0;
5874
5875         /*
5876          * We are now committed to this value whatever it is. Changes in this
5877          * tunable will only affect upcoming migrations, not the current one.
5878          * So we need to save it, and keep it going.
5879          */
5880         move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5881         if (!move_flags)
5882                 return 0;
5883
5884         from = mem_cgroup_from_task(p);
5885
5886         VM_BUG_ON(from == memcg);
5887
5888         mm = get_task_mm(p);
5889         if (!mm)
5890                 return 0;
5891         /* We move charges only when we move a owner of the mm */
5892         if (mm->owner == p) {
5893                 VM_BUG_ON(mc.from);
5894                 VM_BUG_ON(mc.to);
5895                 VM_BUG_ON(mc.precharge);
5896                 VM_BUG_ON(mc.moved_charge);
5897                 VM_BUG_ON(mc.moved_swap);
5898
5899                 spin_lock(&mc.lock);
5900                 mc.mm = mm;
5901                 mc.from = from;
5902                 mc.to = memcg;
5903                 mc.flags = move_flags;
5904                 spin_unlock(&mc.lock);
5905                 /* We set mc.moving_task later */
5906
5907                 ret = mem_cgroup_precharge_mc(mm);
5908                 if (ret)
5909                         mem_cgroup_clear_mc();
5910         } else {
5911                 mmput(mm);
5912         }
5913         return ret;
5914 }
5915
5916 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5917 {
5918         if (mc.to)
5919                 mem_cgroup_clear_mc();
5920 }
5921
5922 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5923                                 unsigned long addr, unsigned long end,
5924                                 struct mm_walk *walk)
5925 {
5926         int ret = 0;
5927         struct vm_area_struct *vma = walk->vma;
5928         pte_t *pte;
5929         spinlock_t *ptl;
5930         enum mc_target_type target_type;
5931         union mc_target target;
5932         struct page *page;
5933
5934         ptl = pmd_trans_huge_lock(pmd, vma);
5935         if (ptl) {
5936                 if (mc.precharge < HPAGE_PMD_NR) {
5937                         spin_unlock(ptl);
5938                         return 0;
5939                 }
5940                 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
5941                 if (target_type == MC_TARGET_PAGE) {
5942                         page = target.page;
5943                         if (!isolate_lru_page(page)) {
5944                                 if (!mem_cgroup_move_account(page, true,
5945                                                              mc.from, mc.to)) {
5946                                         mc.precharge -= HPAGE_PMD_NR;
5947                                         mc.moved_charge += HPAGE_PMD_NR;
5948                                 }
5949                                 putback_lru_page(page);
5950                         }
5951                         put_page(page);
5952                 } else if (target_type == MC_TARGET_DEVICE) {
5953                         page = target.page;
5954                         if (!mem_cgroup_move_account(page, true,
5955                                                      mc.from, mc.to)) {
5956                                 mc.precharge -= HPAGE_PMD_NR;
5957                                 mc.moved_charge += HPAGE_PMD_NR;
5958                         }
5959                         put_page(page);
5960                 }
5961                 spin_unlock(ptl);
5962                 return 0;
5963         }
5964
5965         if (pmd_trans_unstable(pmd))
5966                 return 0;
5967 retry:
5968         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5969         for (; addr != end; addr += PAGE_SIZE) {
5970                 pte_t ptent = *(pte++);
5971                 bool device = false;
5972                 swp_entry_t ent;
5973
5974                 if (!mc.precharge)
5975                         break;
5976
5977                 switch (get_mctgt_type(vma, addr, ptent, &target)) {
5978                 case MC_TARGET_DEVICE:
5979                         device = true;
5980                         fallthrough;
5981                 case MC_TARGET_PAGE:
5982                         page = target.page;
5983                         /*
5984                          * We can have a part of the split pmd here. Moving it
5985                          * can be done but it would be too convoluted so simply
5986                          * ignore such a partial THP and keep it in original
5987                          * memcg. There should be somebody mapping the head.
5988                          */
5989                         if (PageTransCompound(page))
5990                                 goto put;
5991                         if (!device && isolate_lru_page(page))
5992                                 goto put;
5993                         if (!mem_cgroup_move_account(page, false,
5994                                                 mc.from, mc.to)) {
5995                                 mc.precharge--;
5996                                 /* we uncharge from mc.from later. */
5997                                 mc.moved_charge++;
5998                         }
5999                         if (!device)
6000                                 putback_lru_page(page);
6001 put:                    /* get_mctgt_type() gets the page */
6002                         put_page(page);
6003                         break;
6004                 case MC_TARGET_SWAP:
6005                         ent = target.ent;
6006                         if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6007                                 mc.precharge--;
6008                                 mem_cgroup_id_get_many(mc.to, 1);
6009                                 /* we fixup other refcnts and charges later. */
6010                                 mc.moved_swap++;
6011                         }
6012                         break;
6013                 default:
6014                         break;
6015                 }
6016         }
6017         pte_unmap_unlock(pte - 1, ptl);
6018         cond_resched();
6019
6020         if (addr != end) {
6021                 /*
6022                  * We have consumed all precharges we got in can_attach().
6023                  * We try charge one by one, but don't do any additional
6024                  * charges to mc.to if we have failed in charge once in attach()
6025                  * phase.
6026                  */
6027                 ret = mem_cgroup_do_precharge(1);
6028                 if (!ret)
6029                         goto retry;
6030         }
6031
6032         return ret;
6033 }
6034
6035 static const struct mm_walk_ops charge_walk_ops = {
6036         .pmd_entry      = mem_cgroup_move_charge_pte_range,
6037 };
6038
6039 static void mem_cgroup_move_charge(void)
6040 {
6041         lru_add_drain_all();
6042         /*
6043          * Signal lock_page_memcg() to take the memcg's move_lock
6044          * while we're moving its pages to another memcg. Then wait
6045          * for already started RCU-only updates to finish.
6046          */
6047         atomic_inc(&mc.from->moving_account);
6048         synchronize_rcu();
6049 retry:
6050         if (unlikely(!mmap_read_trylock(mc.mm))) {
6051                 /*
6052                  * Someone who are holding the mmap_lock might be waiting in
6053                  * waitq. So we cancel all extra charges, wake up all waiters,
6054                  * and retry. Because we cancel precharges, we might not be able
6055                  * to move enough charges, but moving charge is a best-effort
6056                  * feature anyway, so it wouldn't be a big problem.
6057                  */
6058                 __mem_cgroup_clear_mc();
6059                 cond_resched();
6060                 goto retry;
6061         }
6062         /*
6063          * When we have consumed all precharges and failed in doing
6064          * additional charge, the page walk just aborts.
6065          */
6066         walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
6067                         NULL);
6068
6069         mmap_read_unlock(mc.mm);
6070         atomic_dec(&mc.from->moving_account);
6071 }
6072
6073 static void mem_cgroup_move_task(void)
6074 {
6075         if (mc.to) {
6076                 mem_cgroup_move_charge();
6077                 mem_cgroup_clear_mc();
6078         }
6079 }
6080 #else   /* !CONFIG_MMU */
6081 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6082 {
6083         return 0;
6084 }
6085 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6086 {
6087 }
6088 static void mem_cgroup_move_task(void)
6089 {
6090 }
6091 #endif
6092
6093 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6094 {
6095         if (value == PAGE_COUNTER_MAX)
6096                 seq_puts(m, "max\n");
6097         else
6098                 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6099
6100         return 0;
6101 }
6102
6103 static u64 memory_current_read(struct cgroup_subsys_state *css,
6104                                struct cftype *cft)
6105 {
6106         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6107
6108         return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6109 }
6110
6111 static int memory_min_show(struct seq_file *m, void *v)
6112 {
6113         return seq_puts_memcg_tunable(m,
6114                 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6115 }
6116
6117 static ssize_t memory_min_write(struct kernfs_open_file *of,
6118                                 char *buf, size_t nbytes, loff_t off)
6119 {
6120         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6121         unsigned long min;
6122         int err;
6123
6124         buf = strstrip(buf);
6125         err = page_counter_memparse(buf, "max", &min);
6126         if (err)
6127                 return err;
6128
6129         page_counter_set_min(&memcg->memory, min);
6130
6131         return nbytes;
6132 }
6133
6134 static int memory_low_show(struct seq_file *m, void *v)
6135 {
6136         return seq_puts_memcg_tunable(m,
6137                 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6138 }
6139
6140 static ssize_t memory_low_write(struct kernfs_open_file *of,
6141                                 char *buf, size_t nbytes, loff_t off)
6142 {
6143         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6144         unsigned long low;
6145         int err;
6146
6147         buf = strstrip(buf);
6148         err = page_counter_memparse(buf, "max", &low);
6149         if (err)
6150                 return err;
6151
6152         page_counter_set_low(&memcg->memory, low);
6153
6154         return nbytes;
6155 }
6156
6157 static int memory_high_show(struct seq_file *m, void *v)
6158 {
6159         return seq_puts_memcg_tunable(m,
6160                 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6161 }
6162
6163 static ssize_t memory_high_write(struct kernfs_open_file *of,
6164                                  char *buf, size_t nbytes, loff_t off)
6165 {
6166         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6167         unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6168         bool drained = false;
6169         unsigned long high;
6170         int err;
6171
6172         buf = strstrip(buf);
6173         err = page_counter_memparse(buf, "max", &high);
6174         if (err)
6175                 return err;
6176
6177         page_counter_set_high(&memcg->memory, high);
6178
6179         for (;;) {
6180                 unsigned long nr_pages = page_counter_read(&memcg->memory);
6181                 unsigned long reclaimed;
6182
6183                 if (nr_pages <= high)
6184                         break;
6185
6186                 if (signal_pending(current))
6187                         break;
6188
6189                 if (!drained) {
6190                         drain_all_stock(memcg);
6191                         drained = true;
6192                         continue;
6193                 }
6194
6195                 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6196                                                          GFP_KERNEL, true);
6197
6198                 if (!reclaimed && !nr_retries--)
6199                         break;
6200         }
6201
6202         memcg_wb_domain_size_changed(memcg);
6203         return nbytes;
6204 }
6205
6206 static int memory_max_show(struct seq_file *m, void *v)
6207 {
6208         return seq_puts_memcg_tunable(m,
6209                 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6210 }
6211
6212 static ssize_t memory_max_write(struct kernfs_open_file *of,
6213                                 char *buf, size_t nbytes, loff_t off)
6214 {
6215         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6216         unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6217         bool drained = false;
6218         unsigned long max;
6219         int err;
6220
6221         buf = strstrip(buf);
6222         err = page_counter_memparse(buf, "max", &max);
6223         if (err)
6224                 return err;
6225
6226         xchg(&memcg->memory.max, max);
6227
6228         for (;;) {
6229                 unsigned long nr_pages = page_counter_read(&memcg->memory);
6230
6231                 if (nr_pages <= max)
6232                         break;
6233
6234                 if (signal_pending(current))
6235                         break;
6236
6237                 if (!drained) {
6238                         drain_all_stock(memcg);
6239                         drained = true;
6240                         continue;
6241                 }
6242
6243                 if (nr_reclaims) {
6244                         if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6245                                                           GFP_KERNEL, true))
6246                                 nr_reclaims--;
6247                         continue;
6248                 }
6249
6250                 memcg_memory_event(memcg, MEMCG_OOM);
6251                 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6252                         break;
6253         }
6254
6255         memcg_wb_domain_size_changed(memcg);
6256         return nbytes;
6257 }
6258
6259 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6260 {
6261         seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6262         seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6263         seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6264         seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6265         seq_printf(m, "oom_kill %lu\n",
6266                    atomic_long_read(&events[MEMCG_OOM_KILL]));
6267         seq_printf(m, "oom_group_kill %lu\n",
6268                    atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
6269 }
6270
6271 static int memory_events_show(struct seq_file *m, void *v)
6272 {
6273         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6274
6275         __memory_events_show(m, memcg->memory_events);
6276         return 0;
6277 }
6278
6279 static int memory_events_local_show(struct seq_file *m, void *v)
6280 {
6281         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6282
6283         __memory_events_show(m, memcg->memory_events_local);
6284         return 0;
6285 }
6286
6287 static int memory_stat_show(struct seq_file *m, void *v)
6288 {
6289         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6290         char *buf;
6291
6292         buf = memory_stat_format(memcg);
6293         if (!buf)
6294                 return -ENOMEM;
6295         seq_puts(m, buf);
6296         kfree(buf);
6297         return 0;
6298 }
6299
6300 #ifdef CONFIG_NUMA
6301 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6302                                                      int item)
6303 {
6304         return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item);
6305 }
6306
6307 static int memory_numa_stat_show(struct seq_file *m, void *v)
6308 {
6309         int i;
6310         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6311
6312         mem_cgroup_flush_stats();
6313
6314         for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6315                 int nid;
6316
6317                 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6318                         continue;
6319
6320                 seq_printf(m, "%s", memory_stats[i].name);
6321                 for_each_node_state(nid, N_MEMORY) {
6322                         u64 size;
6323                         struct lruvec *lruvec;
6324
6325                         lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6326                         size = lruvec_page_state_output(lruvec,
6327                                                         memory_stats[i].idx);
6328                         seq_printf(m, " N%d=%llu", nid, size);
6329                 }
6330                 seq_putc(m, '\n');
6331         }
6332
6333         return 0;
6334 }
6335 #endif
6336
6337 static int memory_oom_group_show(struct seq_file *m, void *v)
6338 {
6339         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6340
6341         seq_printf(m, "%d\n", memcg->oom_group);
6342
6343         return 0;
6344 }
6345
6346 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6347                                       char *buf, size_t nbytes, loff_t off)
6348 {
6349         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6350         int ret, oom_group;
6351
6352         buf = strstrip(buf);
6353         if (!buf)
6354                 return -EINVAL;
6355
6356         ret = kstrtoint(buf, 0, &oom_group);
6357         if (ret)
6358                 return ret;
6359
6360         if (oom_group != 0 && oom_group != 1)
6361                 return -EINVAL;
6362
6363         memcg->oom_group = oom_group;
6364
6365         return nbytes;
6366 }
6367
6368 static struct cftype memory_files[] = {
6369         {
6370                 .name = "current",
6371                 .flags = CFTYPE_NOT_ON_ROOT,
6372                 .read_u64 = memory_current_read,
6373         },
6374         {
6375                 .name = "min",
6376                 .flags = CFTYPE_NOT_ON_ROOT,
6377                 .seq_show = memory_min_show,
6378                 .write = memory_min_write,
6379         },
6380         {
6381                 .name = "low",
6382                 .flags = CFTYPE_NOT_ON_ROOT,
6383                 .seq_show = memory_low_show,
6384                 .write = memory_low_write,
6385         },
6386         {
6387                 .name = "high",
6388                 .flags = CFTYPE_NOT_ON_ROOT,
6389                 .seq_show = memory_high_show,
6390                 .write = memory_high_write,
6391         },
6392         {
6393                 .name = "max",
6394                 .flags = CFTYPE_NOT_ON_ROOT,
6395                 .seq_show = memory_max_show,
6396                 .write = memory_max_write,
6397         },
6398         {
6399                 .name = "events",
6400                 .flags = CFTYPE_NOT_ON_ROOT,
6401                 .file_offset = offsetof(struct mem_cgroup, events_file),
6402                 .seq_show = memory_events_show,
6403         },
6404         {
6405                 .name = "events.local",
6406                 .flags = CFTYPE_NOT_ON_ROOT,
6407                 .file_offset = offsetof(struct mem_cgroup, events_local_file),
6408                 .seq_show = memory_events_local_show,
6409         },
6410         {
6411                 .name = "stat",
6412                 .seq_show = memory_stat_show,
6413         },
6414 #ifdef CONFIG_NUMA
6415         {
6416                 .name = "numa_stat",
6417                 .seq_show = memory_numa_stat_show,
6418         },
6419 #endif
6420         {
6421                 .name = "oom.group",
6422                 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6423                 .seq_show = memory_oom_group_show,
6424                 .write = memory_oom_group_write,
6425         },
6426         { }     /* terminate */
6427 };
6428
6429 struct cgroup_subsys memory_cgrp_subsys = {
6430         .css_alloc = mem_cgroup_css_alloc,
6431         .css_online = mem_cgroup_css_online,
6432         .css_offline = mem_cgroup_css_offline,
6433         .css_released = mem_cgroup_css_released,
6434         .css_free = mem_cgroup_css_free,
6435         .css_reset = mem_cgroup_css_reset,
6436         .css_rstat_flush = mem_cgroup_css_rstat_flush,
6437         .can_attach = mem_cgroup_can_attach,
6438         .cancel_attach = mem_cgroup_cancel_attach,
6439         .post_attach = mem_cgroup_move_task,
6440         .dfl_cftypes = memory_files,
6441         .legacy_cftypes = mem_cgroup_legacy_files,
6442         .early_init = 0,
6443 };
6444
6445 /*
6446  * This function calculates an individual cgroup's effective
6447  * protection which is derived from its own memory.min/low, its
6448  * parent's and siblings' settings, as well as the actual memory
6449  * distribution in the tree.
6450  *
6451  * The following rules apply to the effective protection values:
6452  *
6453  * 1. At the first level of reclaim, effective protection is equal to
6454  *    the declared protection in memory.min and memory.low.
6455  *
6456  * 2. To enable safe delegation of the protection configuration, at
6457  *    subsequent levels the effective protection is capped to the
6458  *    parent's effective protection.
6459  *
6460  * 3. To make complex and dynamic subtrees easier to configure, the
6461  *    user is allowed to overcommit the declared protection at a given
6462  *    level. If that is the case, the parent's effective protection is
6463  *    distributed to the children in proportion to how much protection
6464  *    they have declared and how much of it they are utilizing.
6465  *
6466  *    This makes distribution proportional, but also work-conserving:
6467  *    if one cgroup claims much more protection than it uses memory,
6468  *    the unused remainder is available to its siblings.
6469  *
6470  * 4. Conversely, when the declared protection is undercommitted at a
6471  *    given level, the distribution of the larger parental protection
6472  *    budget is NOT proportional. A cgroup's protection from a sibling
6473  *    is capped to its own memory.min/low setting.
6474  *
6475  * 5. However, to allow protecting recursive subtrees from each other
6476  *    without having to declare each individual cgroup's fixed share
6477  *    of the ancestor's claim to protection, any unutilized -
6478  *    "floating" - protection from up the tree is distributed in
6479  *    proportion to each cgroup's *usage*. This makes the protection
6480  *    neutral wrt sibling cgroups and lets them compete freely over
6481  *    the shared parental protection budget, but it protects the
6482  *    subtree as a whole from neighboring subtrees.
6483  *
6484  * Note that 4. and 5. are not in conflict: 4. is about protecting
6485  * against immediate siblings whereas 5. is about protecting against
6486  * neighboring subtrees.
6487  */
6488 static unsigned long effective_protection(unsigned long usage,
6489                                           unsigned long parent_usage,
6490                                           unsigned long setting,
6491                                           unsigned long parent_effective,
6492                                           unsigned long siblings_protected)
6493 {
6494         unsigned long protected;
6495         unsigned long ep;
6496
6497         protected = min(usage, setting);
6498         /*
6499          * If all cgroups at this level combined claim and use more
6500          * protection then what the parent affords them, distribute
6501          * shares in proportion to utilization.
6502          *
6503          * We are using actual utilization rather than the statically
6504          * claimed protection in order to be work-conserving: claimed
6505          * but unused protection is available to siblings that would
6506          * otherwise get a smaller chunk than what they claimed.
6507          */
6508         if (siblings_protected > parent_effective)
6509                 return protected * parent_effective / siblings_protected;
6510
6511         /*
6512          * Ok, utilized protection of all children is within what the
6513          * parent affords them, so we know whatever this child claims
6514          * and utilizes is effectively protected.
6515          *
6516          * If there is unprotected usage beyond this value, reclaim
6517          * will apply pressure in proportion to that amount.
6518          *
6519          * If there is unutilized protection, the cgroup will be fully
6520          * shielded from reclaim, but we do return a smaller value for
6521          * protection than what the group could enjoy in theory. This
6522          * is okay. With the overcommit distribution above, effective
6523          * protection is always dependent on how memory is actually
6524          * consumed among the siblings anyway.
6525          */
6526         ep = protected;
6527
6528         /*
6529          * If the children aren't claiming (all of) the protection
6530          * afforded to them by the parent, distribute the remainder in
6531          * proportion to the (unprotected) memory of each cgroup. That
6532          * way, cgroups that aren't explicitly prioritized wrt each
6533          * other compete freely over the allowance, but they are
6534          * collectively protected from neighboring trees.
6535          *
6536          * We're using unprotected memory for the weight so that if
6537          * some cgroups DO claim explicit protection, we don't protect
6538          * the same bytes twice.
6539          *
6540          * Check both usage and parent_usage against the respective
6541          * protected values. One should imply the other, but they
6542          * aren't read atomically - make sure the division is sane.
6543          */
6544         if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6545                 return ep;
6546         if (parent_effective > siblings_protected &&
6547             parent_usage > siblings_protected &&
6548             usage > protected) {
6549                 unsigned long unclaimed;
6550
6551                 unclaimed = parent_effective - siblings_protected;
6552                 unclaimed *= usage - protected;
6553                 unclaimed /= parent_usage - siblings_protected;
6554
6555                 ep += unclaimed;
6556         }
6557
6558         return ep;
6559 }
6560
6561 /**
6562  * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
6563  * @root: the top ancestor of the sub-tree being checked
6564  * @memcg: the memory cgroup to check
6565  *
6566  * WARNING: This function is not stateless! It can only be used as part
6567  *          of a top-down tree iteration, not for isolated queries.
6568  */
6569 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6570                                      struct mem_cgroup *memcg)
6571 {
6572         unsigned long usage, parent_usage;
6573         struct mem_cgroup *parent;
6574
6575         if (mem_cgroup_disabled())
6576                 return;
6577
6578         if (!root)
6579                 root = root_mem_cgroup;
6580
6581         /*
6582          * Effective values of the reclaim targets are ignored so they
6583          * can be stale. Have a look at mem_cgroup_protection for more
6584          * details.
6585          * TODO: calculation should be more robust so that we do not need
6586          * that special casing.
6587          */
6588         if (memcg == root)
6589                 return;
6590
6591         usage = page_counter_read(&memcg->memory);
6592         if (!usage)
6593                 return;
6594
6595         parent = parent_mem_cgroup(memcg);
6596         /* No parent means a non-hierarchical mode on v1 memcg */
6597         if (!parent)
6598                 return;
6599
6600         if (parent == root) {
6601                 memcg->memory.emin = READ_ONCE(memcg->memory.min);
6602                 memcg->memory.elow = READ_ONCE(memcg->memory.low);
6603                 return;
6604         }
6605
6606         parent_usage = page_counter_read(&parent->memory);
6607
6608         WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6609                         READ_ONCE(memcg->memory.min),
6610                         READ_ONCE(parent->memory.emin),
6611                         atomic_long_read(&parent->memory.children_min_usage)));
6612
6613         WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6614                         READ_ONCE(memcg->memory.low),
6615                         READ_ONCE(parent->memory.elow),
6616                         atomic_long_read(&parent->memory.children_low_usage)));
6617 }
6618
6619 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
6620                         gfp_t gfp)
6621 {
6622         long nr_pages = folio_nr_pages(folio);
6623         int ret;
6624
6625         ret = try_charge(memcg, gfp, nr_pages);
6626         if (ret)
6627                 goto out;
6628
6629         css_get(&memcg->css);
6630         commit_charge(folio, memcg);
6631
6632         local_irq_disable();
6633         mem_cgroup_charge_statistics(memcg, nr_pages);
6634         memcg_check_events(memcg, folio_nid(folio));
6635         local_irq_enable();
6636 out:
6637         return ret;
6638 }
6639
6640 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
6641 {
6642         struct mem_cgroup *memcg;
6643         int ret;
6644
6645         memcg = get_mem_cgroup_from_mm(mm);
6646         ret = charge_memcg(folio, memcg, gfp);
6647         css_put(&memcg->css);
6648
6649         return ret;
6650 }
6651
6652 /**
6653  * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin
6654  * @page: page to charge
6655  * @mm: mm context of the victim
6656  * @gfp: reclaim mode
6657  * @entry: swap entry for which the page is allocated
6658  *
6659  * This function charges a page allocated for swapin. Please call this before
6660  * adding the page to the swapcache.
6661  *
6662  * Returns 0 on success. Otherwise, an error code is returned.
6663  */
6664 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
6665                                   gfp_t gfp, swp_entry_t entry)
6666 {
6667         struct folio *folio = page_folio(page);
6668         struct mem_cgroup *memcg;
6669         unsigned short id;
6670         int ret;
6671
6672         if (mem_cgroup_disabled())
6673                 return 0;
6674
6675         id = lookup_swap_cgroup_id(entry);
6676         rcu_read_lock();
6677         memcg = mem_cgroup_from_id(id);
6678         if (!memcg || !css_tryget_online(&memcg->css))
6679                 memcg = get_mem_cgroup_from_mm(mm);
6680         rcu_read_unlock();
6681
6682         ret = charge_memcg(folio, memcg, gfp);
6683
6684         css_put(&memcg->css);
6685         return ret;
6686 }
6687
6688 /*
6689  * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
6690  * @entry: swap entry for which the page is charged
6691  *
6692  * Call this function after successfully adding the charged page to swapcache.
6693  *
6694  * Note: This function assumes the page for which swap slot is being uncharged
6695  * is order 0 page.
6696  */
6697 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
6698 {
6699         /*
6700          * Cgroup1's unified memory+swap counter has been charged with the
6701          * new swapcache page, finish the transfer by uncharging the swap
6702          * slot. The swap slot would also get uncharged when it dies, but
6703          * it can stick around indefinitely and we'd count the page twice
6704          * the entire time.
6705          *
6706          * Cgroup2 has separate resource counters for memory and swap,
6707          * so this is a non-issue here. Memory and swap charge lifetimes
6708          * correspond 1:1 to page and swap slot lifetimes: we charge the
6709          * page to memory here, and uncharge swap when the slot is freed.
6710          */
6711         if (!mem_cgroup_disabled() && do_memsw_account()) {
6712                 /*
6713                  * The swap entry might not get freed for a long time,
6714                  * let's not wait for it.  The page already received a
6715                  * memory+swap charge, drop the swap entry duplicate.
6716                  */
6717                 mem_cgroup_uncharge_swap(entry, 1);
6718         }
6719 }
6720
6721 struct uncharge_gather {
6722         struct mem_cgroup *memcg;
6723         unsigned long nr_memory;
6724         unsigned long pgpgout;
6725         unsigned long nr_kmem;
6726         int nid;
6727 };
6728
6729 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6730 {
6731         memset(ug, 0, sizeof(*ug));
6732 }
6733
6734 static void uncharge_batch(const struct uncharge_gather *ug)
6735 {
6736         unsigned long flags;
6737
6738         if (ug->nr_memory) {
6739                 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
6740                 if (do_memsw_account())
6741                         page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
6742                 if (ug->nr_kmem)
6743                         memcg_account_kmem(ug->memcg, -ug->nr_kmem);
6744                 memcg_oom_recover(ug->memcg);
6745         }
6746
6747         local_irq_save(flags);
6748         __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6749         __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
6750         memcg_check_events(ug->memcg, ug->nid);
6751         local_irq_restore(flags);
6752
6753         /* drop reference from uncharge_folio */
6754         css_put(&ug->memcg->css);
6755 }
6756
6757 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
6758 {
6759         long nr_pages;
6760         struct mem_cgroup *memcg;
6761         struct obj_cgroup *objcg;
6762
6763         VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
6764
6765         /*
6766          * Nobody should be changing or seriously looking at
6767          * folio memcg or objcg at this point, we have fully
6768          * exclusive access to the folio.
6769          */
6770         if (folio_memcg_kmem(folio)) {
6771                 objcg = __folio_objcg(folio);
6772                 /*
6773                  * This get matches the put at the end of the function and
6774                  * kmem pages do not hold memcg references anymore.
6775                  */
6776                 memcg = get_mem_cgroup_from_objcg(objcg);
6777         } else {
6778                 memcg = __folio_memcg(folio);
6779         }
6780
6781         if (!memcg)
6782                 return;
6783
6784         if (ug->memcg != memcg) {
6785                 if (ug->memcg) {
6786                         uncharge_batch(ug);
6787                         uncharge_gather_clear(ug);
6788                 }
6789                 ug->memcg = memcg;
6790                 ug->nid = folio_nid(folio);
6791
6792                 /* pairs with css_put in uncharge_batch */
6793                 css_get(&memcg->css);
6794         }
6795
6796         nr_pages = folio_nr_pages(folio);
6797
6798         if (folio_memcg_kmem(folio)) {
6799                 ug->nr_memory += nr_pages;
6800                 ug->nr_kmem += nr_pages;
6801
6802                 folio->memcg_data = 0;
6803                 obj_cgroup_put(objcg);
6804         } else {
6805                 /* LRU pages aren't accounted at the root level */
6806                 if (!mem_cgroup_is_root(memcg))
6807                         ug->nr_memory += nr_pages;
6808                 ug->pgpgout++;
6809
6810                 folio->memcg_data = 0;
6811         }
6812
6813         css_put(&memcg->css);
6814 }
6815
6816 void __mem_cgroup_uncharge(struct folio *folio)
6817 {
6818         struct uncharge_gather ug;
6819
6820         /* Don't touch folio->lru of any random page, pre-check: */
6821         if (!folio_memcg(folio))
6822                 return;
6823
6824         uncharge_gather_clear(&ug);
6825         uncharge_folio(folio, &ug);
6826         uncharge_batch(&ug);
6827 }
6828
6829 /**
6830  * __mem_cgroup_uncharge_list - uncharge a list of page
6831  * @page_list: list of pages to uncharge
6832  *
6833  * Uncharge a list of pages previously charged with
6834  * __mem_cgroup_charge().
6835  */
6836 void __mem_cgroup_uncharge_list(struct list_head *page_list)
6837 {
6838         struct uncharge_gather ug;
6839         struct folio *folio;
6840
6841         uncharge_gather_clear(&ug);
6842         list_for_each_entry(folio, page_list, lru)
6843                 uncharge_folio(folio, &ug);
6844         if (ug.memcg)
6845                 uncharge_batch(&ug);
6846 }
6847
6848 /**
6849  * mem_cgroup_migrate - Charge a folio's replacement.
6850  * @old: Currently circulating folio.
6851  * @new: Replacement folio.
6852  *
6853  * Charge @new as a replacement folio for @old. @old will
6854  * be uncharged upon free.
6855  *
6856  * Both folios must be locked, @new->mapping must be set up.
6857  */
6858 void mem_cgroup_migrate(struct folio *old, struct folio *new)
6859 {
6860         struct mem_cgroup *memcg;
6861         long nr_pages = folio_nr_pages(new);
6862         unsigned long flags;
6863
6864         VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
6865         VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
6866         VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
6867         VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
6868
6869         if (mem_cgroup_disabled())
6870                 return;
6871
6872         /* Page cache replacement: new folio already charged? */
6873         if (folio_memcg(new))
6874                 return;
6875
6876         memcg = folio_memcg(old);
6877         VM_WARN_ON_ONCE_FOLIO(!memcg, old);
6878         if (!memcg)
6879                 return;
6880
6881         /* Force-charge the new page. The old one will be freed soon */
6882         if (!mem_cgroup_is_root(memcg)) {
6883                 page_counter_charge(&memcg->memory, nr_pages);
6884                 if (do_memsw_account())
6885                         page_counter_charge(&memcg->memsw, nr_pages);
6886         }
6887
6888         css_get(&memcg->css);
6889         commit_charge(new, memcg);
6890
6891         local_irq_save(flags);
6892         mem_cgroup_charge_statistics(memcg, nr_pages);
6893         memcg_check_events(memcg, folio_nid(new));
6894         local_irq_restore(flags);
6895 }
6896
6897 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
6898 EXPORT_SYMBOL(memcg_sockets_enabled_key);
6899
6900 void mem_cgroup_sk_alloc(struct sock *sk)
6901 {
6902         struct mem_cgroup *memcg;
6903
6904         if (!mem_cgroup_sockets_enabled)
6905                 return;
6906
6907         /* Do not associate the sock with unrelated interrupted task's memcg. */
6908         if (!in_task())
6909                 return;
6910
6911         rcu_read_lock();
6912         memcg = mem_cgroup_from_task(current);
6913         if (memcg == root_mem_cgroup)
6914                 goto out;
6915         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
6916                 goto out;
6917         if (css_tryget(&memcg->css))
6918                 sk->sk_memcg = memcg;
6919 out:
6920         rcu_read_unlock();
6921 }
6922
6923 void mem_cgroup_sk_free(struct sock *sk)
6924 {
6925         if (sk->sk_memcg)
6926                 css_put(&sk->sk_memcg->css);
6927 }
6928
6929 /**
6930  * mem_cgroup_charge_skmem - charge socket memory
6931  * @memcg: memcg to charge
6932  * @nr_pages: number of pages to charge
6933  * @gfp_mask: reclaim mode
6934  *
6935  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
6936  * @memcg's configured limit, %false if it doesn't.
6937  */
6938 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
6939                              gfp_t gfp_mask)
6940 {
6941         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6942                 struct page_counter *fail;
6943
6944                 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
6945                         memcg->tcpmem_pressure = 0;
6946                         return true;
6947                 }
6948                 memcg->tcpmem_pressure = 1;
6949                 if (gfp_mask & __GFP_NOFAIL) {
6950                         page_counter_charge(&memcg->tcpmem, nr_pages);
6951                         return true;
6952                 }
6953                 return false;
6954         }
6955
6956         if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
6957                 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
6958                 return true;
6959         }
6960
6961         return false;
6962 }
6963
6964 /**
6965  * mem_cgroup_uncharge_skmem - uncharge socket memory
6966  * @memcg: memcg to uncharge
6967  * @nr_pages: number of pages to uncharge
6968  */
6969 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6970 {
6971         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6972                 page_counter_uncharge(&memcg->tcpmem, nr_pages);
6973                 return;
6974         }
6975
6976         mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
6977
6978         refill_stock(memcg, nr_pages);
6979 }
6980
6981 static int __init cgroup_memory(char *s)
6982 {
6983         char *token;
6984
6985         while ((token = strsep(&s, ",")) != NULL) {
6986                 if (!*token)
6987                         continue;
6988                 if (!strcmp(token, "nosocket"))
6989                         cgroup_memory_nosocket = true;
6990                 if (!strcmp(token, "nokmem"))
6991                         cgroup_memory_nokmem = true;
6992         }
6993         return 1;
6994 }
6995 __setup("cgroup.memory=", cgroup_memory);
6996
6997 /*
6998  * subsys_initcall() for memory controller.
6999  *
7000  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7001  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7002  * basically everything that doesn't depend on a specific mem_cgroup structure
7003  * should be initialized from here.
7004  */
7005 static int __init mem_cgroup_init(void)
7006 {
7007         int cpu, node;
7008
7009         /*
7010          * Currently s32 type (can refer to struct batched_lruvec_stat) is
7011          * used for per-memcg-per-cpu caching of per-node statistics. In order
7012          * to work fine, we should make sure that the overfill threshold can't
7013          * exceed S32_MAX / PAGE_SIZE.
7014          */
7015         BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7016
7017         cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7018                                   memcg_hotplug_cpu_dead);
7019
7020         for_each_possible_cpu(cpu)
7021                 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7022                           drain_local_stock);
7023
7024         for_each_node(node) {
7025                 struct mem_cgroup_tree_per_node *rtpn;
7026
7027                 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7028                                     node_online(node) ? node : NUMA_NO_NODE);
7029
7030                 rtpn->rb_root = RB_ROOT;
7031                 rtpn->rb_rightmost = NULL;
7032                 spin_lock_init(&rtpn->lock);
7033                 soft_limit_tree.rb_tree_per_node[node] = rtpn;
7034         }
7035
7036         return 0;
7037 }
7038 subsys_initcall(mem_cgroup_init);
7039
7040 #ifdef CONFIG_MEMCG_SWAP
7041 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7042 {
7043         while (!refcount_inc_not_zero(&memcg->id.ref)) {
7044                 /*
7045                  * The root cgroup cannot be destroyed, so it's refcount must
7046                  * always be >= 1.
7047                  */
7048                 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
7049                         VM_BUG_ON(1);
7050                         break;
7051                 }
7052                 memcg = parent_mem_cgroup(memcg);
7053                 if (!memcg)
7054                         memcg = root_mem_cgroup;
7055         }
7056         return memcg;
7057 }
7058
7059 /**
7060  * mem_cgroup_swapout - transfer a memsw charge to swap
7061  * @folio: folio whose memsw charge to transfer
7062  * @entry: swap entry to move the charge to
7063  *
7064  * Transfer the memsw charge of @folio to @entry.
7065  */
7066 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7067 {
7068         struct mem_cgroup *memcg, *swap_memcg;
7069         unsigned int nr_entries;
7070         unsigned short oldid;
7071
7072         VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7073         VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7074
7075         if (mem_cgroup_disabled())
7076                 return;
7077
7078         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7079                 return;
7080
7081         memcg = folio_memcg(folio);
7082
7083         VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7084         if (!memcg)
7085                 return;
7086
7087         /*
7088          * In case the memcg owning these pages has been offlined and doesn't
7089          * have an ID allocated to it anymore, charge the closest online
7090          * ancestor for the swap instead and transfer the memory+swap charge.
7091          */
7092         swap_memcg = mem_cgroup_id_get_online(memcg);
7093         nr_entries = folio_nr_pages(folio);
7094         /* Get references for the tail pages, too */
7095         if (nr_entries > 1)
7096                 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7097         oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7098                                    nr_entries);
7099         VM_BUG_ON_FOLIO(oldid, folio);
7100         mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7101
7102         folio->memcg_data = 0;
7103
7104         if (!mem_cgroup_is_root(memcg))
7105                 page_counter_uncharge(&memcg->memory, nr_entries);
7106
7107         if (!cgroup_memory_noswap && memcg != swap_memcg) {
7108                 if (!mem_cgroup_is_root(swap_memcg))
7109                         page_counter_charge(&swap_memcg->memsw, nr_entries);
7110                 page_counter_uncharge(&memcg->memsw, nr_entries);
7111         }
7112
7113         /*
7114          * Interrupts should be disabled here because the caller holds the
7115          * i_pages lock which is taken with interrupts-off. It is
7116          * important here to have the interrupts disabled because it is the
7117          * only synchronisation we have for updating the per-CPU variables.
7118          */
7119         memcg_stats_lock();
7120         mem_cgroup_charge_statistics(memcg, -nr_entries);
7121         memcg_stats_unlock();
7122         memcg_check_events(memcg, folio_nid(folio));
7123
7124         css_put(&memcg->css);
7125 }
7126
7127 /**
7128  * __mem_cgroup_try_charge_swap - try charging swap space for a page
7129  * @page: page being added to swap
7130  * @entry: swap entry to charge
7131  *
7132  * Try to charge @page's memcg for the swap space at @entry.
7133  *
7134  * Returns 0 on success, -ENOMEM on failure.
7135  */
7136 int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7137 {
7138         unsigned int nr_pages = thp_nr_pages(page);
7139         struct page_counter *counter;
7140         struct mem_cgroup *memcg;
7141         unsigned short oldid;
7142
7143         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7144                 return 0;
7145
7146         memcg = page_memcg(page);
7147
7148         VM_WARN_ON_ONCE_PAGE(!memcg, page);
7149         if (!memcg)
7150                 return 0;
7151
7152         if (!entry.val) {
7153                 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7154                 return 0;
7155         }
7156
7157         memcg = mem_cgroup_id_get_online(memcg);
7158
7159         if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
7160             !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7161                 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7162                 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7163                 mem_cgroup_id_put(memcg);
7164                 return -ENOMEM;
7165         }
7166
7167         /* Get references for the tail pages, too */
7168         if (nr_pages > 1)
7169                 mem_cgroup_id_get_many(memcg, nr_pages - 1);
7170         oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7171         VM_BUG_ON_PAGE(oldid, page);
7172         mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7173
7174         return 0;
7175 }
7176
7177 /**
7178  * __mem_cgroup_uncharge_swap - uncharge swap space
7179  * @entry: swap entry to uncharge
7180  * @nr_pages: the amount of swap space to uncharge
7181  */
7182 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7183 {
7184         struct mem_cgroup *memcg;
7185         unsigned short id;
7186
7187         id = swap_cgroup_record(entry, 0, nr_pages);
7188         rcu_read_lock();
7189         memcg = mem_cgroup_from_id(id);
7190         if (memcg) {
7191                 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
7192                         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7193                                 page_counter_uncharge(&memcg->swap, nr_pages);
7194                         else
7195                                 page_counter_uncharge(&memcg->memsw, nr_pages);
7196                 }
7197                 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7198                 mem_cgroup_id_put_many(memcg, nr_pages);
7199         }
7200         rcu_read_unlock();
7201 }
7202
7203 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7204 {
7205         long nr_swap_pages = get_nr_swap_pages();
7206
7207         if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7208                 return nr_swap_pages;
7209         for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7210                 nr_swap_pages = min_t(long, nr_swap_pages,
7211                                       READ_ONCE(memcg->swap.max) -
7212                                       page_counter_read(&memcg->swap));
7213         return nr_swap_pages;
7214 }
7215
7216 bool mem_cgroup_swap_full(struct page *page)
7217 {
7218         struct mem_cgroup *memcg;
7219
7220         VM_BUG_ON_PAGE(!PageLocked(page), page);
7221
7222         if (vm_swap_full())
7223                 return true;
7224         if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7225                 return false;
7226
7227         memcg = page_memcg(page);
7228         if (!memcg)
7229                 return false;
7230
7231         for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
7232                 unsigned long usage = page_counter_read(&memcg->swap);
7233
7234                 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7235                     usage * 2 >= READ_ONCE(memcg->swap.max))
7236                         return true;
7237         }
7238
7239         return false;
7240 }
7241
7242 static int __init setup_swap_account(char *s)
7243 {
7244         if (!strcmp(s, "1"))
7245                 cgroup_memory_noswap = false;
7246         else if (!strcmp(s, "0"))
7247                 cgroup_memory_noswap = true;
7248         return 1;
7249 }
7250 __setup("swapaccount=", setup_swap_account);
7251
7252 static u64 swap_current_read(struct cgroup_subsys_state *css,
7253                              struct cftype *cft)
7254 {
7255         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7256
7257         return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7258 }
7259
7260 static int swap_high_show(struct seq_file *m, void *v)
7261 {
7262         return seq_puts_memcg_tunable(m,
7263                 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7264 }
7265
7266 static ssize_t swap_high_write(struct kernfs_open_file *of,
7267                                char *buf, size_t nbytes, loff_t off)
7268 {
7269         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7270         unsigned long high;
7271         int err;
7272
7273         buf = strstrip(buf);
7274         err = page_counter_memparse(buf, "max", &high);
7275         if (err)
7276                 return err;
7277
7278         page_counter_set_high(&memcg->swap, high);
7279
7280         return nbytes;
7281 }
7282
7283 static int swap_max_show(struct seq_file *m, void *v)
7284 {
7285         return seq_puts_memcg_tunable(m,
7286                 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7287 }
7288
7289 static ssize_t swap_max_write(struct kernfs_open_file *of,
7290                               char *buf, size_t nbytes, loff_t off)
7291 {
7292         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7293         unsigned long max;
7294         int err;
7295
7296         buf = strstrip(buf);
7297         err = page_counter_memparse(buf, "max", &max);
7298         if (err)
7299                 return err;
7300
7301         xchg(&memcg->swap.max, max);
7302
7303         return nbytes;
7304 }
7305
7306 static int swap_events_show(struct seq_file *m, void *v)
7307 {
7308         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7309
7310         seq_printf(m, "high %lu\n",
7311                    atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7312         seq_printf(m, "max %lu\n",
7313                    atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7314         seq_printf(m, "fail %lu\n",
7315                    atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7316
7317         return 0;
7318 }
7319
7320 static struct cftype swap_files[] = {
7321         {
7322                 .name = "swap.current",
7323                 .flags = CFTYPE_NOT_ON_ROOT,
7324                 .read_u64 = swap_current_read,
7325         },
7326         {
7327                 .name = "swap.high",
7328                 .flags = CFTYPE_NOT_ON_ROOT,
7329                 .seq_show = swap_high_show,
7330                 .write = swap_high_write,
7331         },
7332         {
7333                 .name = "swap.max",
7334                 .flags = CFTYPE_NOT_ON_ROOT,
7335                 .seq_show = swap_max_show,
7336                 .write = swap_max_write,
7337         },
7338         {
7339                 .name = "swap.events",
7340                 .flags = CFTYPE_NOT_ON_ROOT,
7341                 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
7342                 .seq_show = swap_events_show,
7343         },
7344         { }     /* terminate */
7345 };
7346
7347 static struct cftype memsw_files[] = {
7348         {
7349                 .name = "memsw.usage_in_bytes",
7350                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7351                 .read_u64 = mem_cgroup_read_u64,
7352         },
7353         {
7354                 .name = "memsw.max_usage_in_bytes",
7355                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7356                 .write = mem_cgroup_reset,
7357                 .read_u64 = mem_cgroup_read_u64,
7358         },
7359         {
7360                 .name = "memsw.limit_in_bytes",
7361                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7362                 .write = mem_cgroup_write,
7363                 .read_u64 = mem_cgroup_read_u64,
7364         },
7365         {
7366                 .name = "memsw.failcnt",
7367                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7368                 .write = mem_cgroup_reset,
7369                 .read_u64 = mem_cgroup_read_u64,
7370         },
7371         { },    /* terminate */
7372 };
7373
7374 /*
7375  * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
7376  * instead of a core_initcall(), this could mean cgroup_memory_noswap still
7377  * remains set to false even when memcg is disabled via "cgroup_disable=memory"
7378  * boot parameter. This may result in premature OOPS inside
7379  * mem_cgroup_get_nr_swap_pages() function in corner cases.
7380  */
7381 static int __init mem_cgroup_swap_init(void)
7382 {
7383         /* No memory control -> no swap control */
7384         if (mem_cgroup_disabled())
7385                 cgroup_memory_noswap = true;
7386
7387         if (cgroup_memory_noswap)
7388                 return 0;
7389
7390         WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7391         WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7392
7393         return 0;
7394 }
7395 core_initcall(mem_cgroup_swap_init);
7396
7397 #endif /* CONFIG_MEMCG_SWAP */