Merge tag 'iio-fixes-for-5.18a' of https://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / mm / memcontrol.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
3  *
4  * Copyright IBM Corporation, 2007
5  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6  *
7  * Copyright 2007 OpenVZ SWsoft Inc
8  * Author: Pavel Emelianov <xemul@openvz.org>
9  *
10  * Memory thresholds
11  * Copyright (C) 2009 Nokia Corporation
12  * Author: Kirill A. Shutemov
13  *
14  * Kernel Memory Controller
15  * Copyright (C) 2012 Parallels Inc. and Google Inc.
16  * Authors: Glauber Costa and Suleiman Souhlal
17  *
18  * Native page reclaim
19  * Charge lifetime sanitation
20  * Lockless page tracking & accounting
21  * Unified hierarchy configuration model
22  * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23  *
24  * Per memcg lru locking
25  * Copyright (C) 2020 Alibaba, Inc, Alex Shi
26  */
27
28 #include <linux/page_counter.h>
29 #include <linux/memcontrol.h>
30 #include <linux/cgroup.h>
31 #include <linux/pagewalk.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/vm_event_item.h>
37 #include <linux/smp.h>
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
40 #include <linux/bit_spinlock.h>
41 #include <linux/rcupdate.h>
42 #include <linux/limits.h>
43 #include <linux/export.h>
44 #include <linux/mutex.h>
45 #include <linux/rbtree.h>
46 #include <linux/slab.h>
47 #include <linux/swap.h>
48 #include <linux/swapops.h>
49 #include <linux/spinlock.h>
50 #include <linux/eventfd.h>
51 #include <linux/poll.h>
52 #include <linux/sort.h>
53 #include <linux/fs.h>
54 #include <linux/seq_file.h>
55 #include <linux/vmpressure.h>
56 #include <linux/memremap.h>
57 #include <linux/mm_inline.h>
58 #include <linux/swap_cgroup.h>
59 #include <linux/cpu.h>
60 #include <linux/oom.h>
61 #include <linux/lockdep.h>
62 #include <linux/file.h>
63 #include <linux/resume_user_mode.h>
64 #include <linux/psi.h>
65 #include <linux/seq_buf.h>
66 #include "internal.h"
67 #include <net/sock.h>
68 #include <net/ip.h>
69 #include "slab.h"
70
71 #include <linux/uaccess.h>
72
73 #include <trace/events/vmscan.h>
74
75 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
76 EXPORT_SYMBOL(memory_cgrp_subsys);
77
78 struct mem_cgroup *root_mem_cgroup __read_mostly;
79
80 /* Active memory cgroup to use from an interrupt context */
81 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
82 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
83
84 /* Socket memory accounting disabled? */
85 static bool cgroup_memory_nosocket __ro_after_init;
86
87 /* Kernel memory accounting disabled? */
88 static bool cgroup_memory_nokmem __ro_after_init;
89
90 /* Whether the swap controller is active */
91 #ifdef CONFIG_MEMCG_SWAP
92 bool cgroup_memory_noswap __ro_after_init;
93 #else
94 #define cgroup_memory_noswap            1
95 #endif
96
97 #ifdef CONFIG_CGROUP_WRITEBACK
98 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
99 #endif
100
101 /* Whether legacy memory+swap accounting is active */
102 static bool do_memsw_account(void)
103 {
104         return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
105 }
106
107 #define THRESHOLDS_EVENTS_TARGET 128
108 #define SOFTLIMIT_EVENTS_TARGET 1024
109
110 /*
111  * Cgroups above their limits are maintained in a RB-Tree, independent of
112  * their hierarchy representation
113  */
114
115 struct mem_cgroup_tree_per_node {
116         struct rb_root rb_root;
117         struct rb_node *rb_rightmost;
118         spinlock_t lock;
119 };
120
121 struct mem_cgroup_tree {
122         struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
123 };
124
125 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
126
127 /* for OOM */
128 struct mem_cgroup_eventfd_list {
129         struct list_head list;
130         struct eventfd_ctx *eventfd;
131 };
132
133 /*
134  * cgroup_event represents events which userspace want to receive.
135  */
136 struct mem_cgroup_event {
137         /*
138          * memcg which the event belongs to.
139          */
140         struct mem_cgroup *memcg;
141         /*
142          * eventfd to signal userspace about the event.
143          */
144         struct eventfd_ctx *eventfd;
145         /*
146          * Each of these stored in a list by the cgroup.
147          */
148         struct list_head list;
149         /*
150          * register_event() callback will be used to add new userspace
151          * waiter for changes related to this event.  Use eventfd_signal()
152          * on eventfd to send notification to userspace.
153          */
154         int (*register_event)(struct mem_cgroup *memcg,
155                               struct eventfd_ctx *eventfd, const char *args);
156         /*
157          * unregister_event() callback will be called when userspace closes
158          * the eventfd or on cgroup removing.  This callback must be set,
159          * if you want provide notification functionality.
160          */
161         void (*unregister_event)(struct mem_cgroup *memcg,
162                                  struct eventfd_ctx *eventfd);
163         /*
164          * All fields below needed to unregister event when
165          * userspace closes eventfd.
166          */
167         poll_table pt;
168         wait_queue_head_t *wqh;
169         wait_queue_entry_t wait;
170         struct work_struct remove;
171 };
172
173 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
174 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
175
176 /* Stuffs for move charges at task migration. */
177 /*
178  * Types of charges to be moved.
179  */
180 #define MOVE_ANON       0x1U
181 #define MOVE_FILE       0x2U
182 #define MOVE_MASK       (MOVE_ANON | MOVE_FILE)
183
184 /* "mc" and its members are protected by cgroup_mutex */
185 static struct move_charge_struct {
186         spinlock_t        lock; /* for from, to */
187         struct mm_struct  *mm;
188         struct mem_cgroup *from;
189         struct mem_cgroup *to;
190         unsigned long flags;
191         unsigned long precharge;
192         unsigned long moved_charge;
193         unsigned long moved_swap;
194         struct task_struct *moving_task;        /* a task moving charges */
195         wait_queue_head_t waitq;                /* a waitq for other context */
196 } mc = {
197         .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
198         .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
199 };
200
201 /*
202  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
203  * limit reclaim to prevent infinite loops, if they ever occur.
204  */
205 #define MEM_CGROUP_MAX_RECLAIM_LOOPS            100
206 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
207
208 /* for encoding cft->private value on file */
209 enum res_type {
210         _MEM,
211         _MEMSWAP,
212         _OOM_TYPE,
213         _KMEM,
214         _TCP,
215 };
216
217 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
218 #define MEMFILE_TYPE(val)       ((val) >> 16 & 0xffff)
219 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
220 /* Used for OOM notifier */
221 #define OOM_CONTROL             (0)
222
223 /*
224  * Iteration constructs for visiting all cgroups (under a tree).  If
225  * loops are exited prematurely (break), mem_cgroup_iter_break() must
226  * be used for reference counting.
227  */
228 #define for_each_mem_cgroup_tree(iter, root)            \
229         for (iter = mem_cgroup_iter(root, NULL, NULL);  \
230              iter != NULL;                              \
231              iter = mem_cgroup_iter(root, iter, NULL))
232
233 #define for_each_mem_cgroup(iter)                       \
234         for (iter = mem_cgroup_iter(NULL, NULL, NULL);  \
235              iter != NULL;                              \
236              iter = mem_cgroup_iter(NULL, iter, NULL))
237
238 static inline bool task_is_dying(void)
239 {
240         return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
241                 (current->flags & PF_EXITING);
242 }
243
244 /* Some nice accessors for the vmpressure. */
245 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
246 {
247         if (!memcg)
248                 memcg = root_mem_cgroup;
249         return &memcg->vmpressure;
250 }
251
252 struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr)
253 {
254         return container_of(vmpr, struct mem_cgroup, vmpressure);
255 }
256
257 #ifdef CONFIG_MEMCG_KMEM
258 static DEFINE_SPINLOCK(objcg_lock);
259
260 bool mem_cgroup_kmem_disabled(void)
261 {
262         return cgroup_memory_nokmem;
263 }
264
265 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
266                                       unsigned int nr_pages);
267
268 static void obj_cgroup_release(struct percpu_ref *ref)
269 {
270         struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
271         unsigned int nr_bytes;
272         unsigned int nr_pages;
273         unsigned long flags;
274
275         /*
276          * At this point all allocated objects are freed, and
277          * objcg->nr_charged_bytes can't have an arbitrary byte value.
278          * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
279          *
280          * The following sequence can lead to it:
281          * 1) CPU0: objcg == stock->cached_objcg
282          * 2) CPU1: we do a small allocation (e.g. 92 bytes),
283          *          PAGE_SIZE bytes are charged
284          * 3) CPU1: a process from another memcg is allocating something,
285          *          the stock if flushed,
286          *          objcg->nr_charged_bytes = PAGE_SIZE - 92
287          * 5) CPU0: we do release this object,
288          *          92 bytes are added to stock->nr_bytes
289          * 6) CPU0: stock is flushed,
290          *          92 bytes are added to objcg->nr_charged_bytes
291          *
292          * In the result, nr_charged_bytes == PAGE_SIZE.
293          * This page will be uncharged in obj_cgroup_release().
294          */
295         nr_bytes = atomic_read(&objcg->nr_charged_bytes);
296         WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
297         nr_pages = nr_bytes >> PAGE_SHIFT;
298
299         if (nr_pages)
300                 obj_cgroup_uncharge_pages(objcg, nr_pages);
301
302         spin_lock_irqsave(&objcg_lock, flags);
303         list_del(&objcg->list);
304         spin_unlock_irqrestore(&objcg_lock, flags);
305
306         percpu_ref_exit(ref);
307         kfree_rcu(objcg, rcu);
308 }
309
310 static struct obj_cgroup *obj_cgroup_alloc(void)
311 {
312         struct obj_cgroup *objcg;
313         int ret;
314
315         objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
316         if (!objcg)
317                 return NULL;
318
319         ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
320                               GFP_KERNEL);
321         if (ret) {
322                 kfree(objcg);
323                 return NULL;
324         }
325         INIT_LIST_HEAD(&objcg->list);
326         return objcg;
327 }
328
329 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
330                                   struct mem_cgroup *parent)
331 {
332         struct obj_cgroup *objcg, *iter;
333
334         objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
335
336         spin_lock_irq(&objcg_lock);
337
338         /* 1) Ready to reparent active objcg. */
339         list_add(&objcg->list, &memcg->objcg_list);
340         /* 2) Reparent active objcg and already reparented objcgs to parent. */
341         list_for_each_entry(iter, &memcg->objcg_list, list)
342                 WRITE_ONCE(iter->memcg, parent);
343         /* 3) Move already reparented objcgs to the parent's list */
344         list_splice(&memcg->objcg_list, &parent->objcg_list);
345
346         spin_unlock_irq(&objcg_lock);
347
348         percpu_ref_kill(&objcg->refcnt);
349 }
350
351 /*
352  * A lot of the calls to the cache allocation functions are expected to be
353  * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
354  * conditional to this static branch, we'll have to allow modules that does
355  * kmem_cache_alloc and the such to see this symbol as well
356  */
357 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
358 EXPORT_SYMBOL(memcg_kmem_enabled_key);
359 #endif
360
361 /**
362  * mem_cgroup_css_from_page - css of the memcg associated with a page
363  * @page: page of interest
364  *
365  * If memcg is bound to the default hierarchy, css of the memcg associated
366  * with @page is returned.  The returned css remains associated with @page
367  * until it is released.
368  *
369  * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
370  * is returned.
371  */
372 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
373 {
374         struct mem_cgroup *memcg;
375
376         memcg = page_memcg(page);
377
378         if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
379                 memcg = root_mem_cgroup;
380
381         return &memcg->css;
382 }
383
384 /**
385  * page_cgroup_ino - return inode number of the memcg a page is charged to
386  * @page: the page
387  *
388  * Look up the closest online ancestor of the memory cgroup @page is charged to
389  * and return its inode number or 0 if @page is not charged to any cgroup. It
390  * is safe to call this function without holding a reference to @page.
391  *
392  * Note, this function is inherently racy, because there is nothing to prevent
393  * the cgroup inode from getting torn down and potentially reallocated a moment
394  * after page_cgroup_ino() returns, so it only should be used by callers that
395  * do not care (such as procfs interfaces).
396  */
397 ino_t page_cgroup_ino(struct page *page)
398 {
399         struct mem_cgroup *memcg;
400         unsigned long ino = 0;
401
402         rcu_read_lock();
403         memcg = page_memcg_check(page);
404
405         while (memcg && !(memcg->css.flags & CSS_ONLINE))
406                 memcg = parent_mem_cgroup(memcg);
407         if (memcg)
408                 ino = cgroup_ino(memcg->css.cgroup);
409         rcu_read_unlock();
410         return ino;
411 }
412
413 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
414                                          struct mem_cgroup_tree_per_node *mctz,
415                                          unsigned long new_usage_in_excess)
416 {
417         struct rb_node **p = &mctz->rb_root.rb_node;
418         struct rb_node *parent = NULL;
419         struct mem_cgroup_per_node *mz_node;
420         bool rightmost = true;
421
422         if (mz->on_tree)
423                 return;
424
425         mz->usage_in_excess = new_usage_in_excess;
426         if (!mz->usage_in_excess)
427                 return;
428         while (*p) {
429                 parent = *p;
430                 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
431                                         tree_node);
432                 if (mz->usage_in_excess < mz_node->usage_in_excess) {
433                         p = &(*p)->rb_left;
434                         rightmost = false;
435                 } else {
436                         p = &(*p)->rb_right;
437                 }
438         }
439
440         if (rightmost)
441                 mctz->rb_rightmost = &mz->tree_node;
442
443         rb_link_node(&mz->tree_node, parent, p);
444         rb_insert_color(&mz->tree_node, &mctz->rb_root);
445         mz->on_tree = true;
446 }
447
448 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
449                                          struct mem_cgroup_tree_per_node *mctz)
450 {
451         if (!mz->on_tree)
452                 return;
453
454         if (&mz->tree_node == mctz->rb_rightmost)
455                 mctz->rb_rightmost = rb_prev(&mz->tree_node);
456
457         rb_erase(&mz->tree_node, &mctz->rb_root);
458         mz->on_tree = false;
459 }
460
461 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
462                                        struct mem_cgroup_tree_per_node *mctz)
463 {
464         unsigned long flags;
465
466         spin_lock_irqsave(&mctz->lock, flags);
467         __mem_cgroup_remove_exceeded(mz, mctz);
468         spin_unlock_irqrestore(&mctz->lock, flags);
469 }
470
471 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
472 {
473         unsigned long nr_pages = page_counter_read(&memcg->memory);
474         unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
475         unsigned long excess = 0;
476
477         if (nr_pages > soft_limit)
478                 excess = nr_pages - soft_limit;
479
480         return excess;
481 }
482
483 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid)
484 {
485         unsigned long excess;
486         struct mem_cgroup_per_node *mz;
487         struct mem_cgroup_tree_per_node *mctz;
488
489         mctz = soft_limit_tree.rb_tree_per_node[nid];
490         if (!mctz)
491                 return;
492         /*
493          * Necessary to update all ancestors when hierarchy is used.
494          * because their event counter is not touched.
495          */
496         for (; memcg; memcg = parent_mem_cgroup(memcg)) {
497                 mz = memcg->nodeinfo[nid];
498                 excess = soft_limit_excess(memcg);
499                 /*
500                  * We have to update the tree if mz is on RB-tree or
501                  * mem is over its softlimit.
502                  */
503                 if (excess || mz->on_tree) {
504                         unsigned long flags;
505
506                         spin_lock_irqsave(&mctz->lock, flags);
507                         /* if on-tree, remove it */
508                         if (mz->on_tree)
509                                 __mem_cgroup_remove_exceeded(mz, mctz);
510                         /*
511                          * Insert again. mz->usage_in_excess will be updated.
512                          * If excess is 0, no tree ops.
513                          */
514                         __mem_cgroup_insert_exceeded(mz, mctz, excess);
515                         spin_unlock_irqrestore(&mctz->lock, flags);
516                 }
517         }
518 }
519
520 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
521 {
522         struct mem_cgroup_tree_per_node *mctz;
523         struct mem_cgroup_per_node *mz;
524         int nid;
525
526         for_each_node(nid) {
527                 mz = memcg->nodeinfo[nid];
528                 mctz = soft_limit_tree.rb_tree_per_node[nid];
529                 if (mctz)
530                         mem_cgroup_remove_exceeded(mz, mctz);
531         }
532 }
533
534 static struct mem_cgroup_per_node *
535 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
536 {
537         struct mem_cgroup_per_node *mz;
538
539 retry:
540         mz = NULL;
541         if (!mctz->rb_rightmost)
542                 goto done;              /* Nothing to reclaim from */
543
544         mz = rb_entry(mctz->rb_rightmost,
545                       struct mem_cgroup_per_node, tree_node);
546         /*
547          * Remove the node now but someone else can add it back,
548          * we will to add it back at the end of reclaim to its correct
549          * position in the tree.
550          */
551         __mem_cgroup_remove_exceeded(mz, mctz);
552         if (!soft_limit_excess(mz->memcg) ||
553             !css_tryget(&mz->memcg->css))
554                 goto retry;
555 done:
556         return mz;
557 }
558
559 static struct mem_cgroup_per_node *
560 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
561 {
562         struct mem_cgroup_per_node *mz;
563
564         spin_lock_irq(&mctz->lock);
565         mz = __mem_cgroup_largest_soft_limit_node(mctz);
566         spin_unlock_irq(&mctz->lock);
567         return mz;
568 }
569
570 /*
571  * memcg and lruvec stats flushing
572  *
573  * Many codepaths leading to stats update or read are performance sensitive and
574  * adding stats flushing in such codepaths is not desirable. So, to optimize the
575  * flushing the kernel does:
576  *
577  * 1) Periodically and asynchronously flush the stats every 2 seconds to not let
578  *    rstat update tree grow unbounded.
579  *
580  * 2) Flush the stats synchronously on reader side only when there are more than
581  *    (MEMCG_CHARGE_BATCH * nr_cpus) update events. Though this optimization
582  *    will let stats be out of sync by atmost (MEMCG_CHARGE_BATCH * nr_cpus) but
583  *    only for 2 seconds due to (1).
584  */
585 static void flush_memcg_stats_dwork(struct work_struct *w);
586 static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
587 static DEFINE_SPINLOCK(stats_flush_lock);
588 static DEFINE_PER_CPU(unsigned int, stats_updates);
589 static atomic_t stats_flush_threshold = ATOMIC_INIT(0);
590
591 /*
592  * Accessors to ensure that preemption is disabled on PREEMPT_RT because it can
593  * not rely on this as part of an acquired spinlock_t lock. These functions are
594  * never used in hardirq context on PREEMPT_RT and therefore disabling preemtion
595  * is sufficient.
596  */
597 static void memcg_stats_lock(void)
598 {
599 #ifdef CONFIG_PREEMPT_RT
600       preempt_disable();
601 #else
602       VM_BUG_ON(!irqs_disabled());
603 #endif
604 }
605
606 static void __memcg_stats_lock(void)
607 {
608 #ifdef CONFIG_PREEMPT_RT
609       preempt_disable();
610 #endif
611 }
612
613 static void memcg_stats_unlock(void)
614 {
615 #ifdef CONFIG_PREEMPT_RT
616       preempt_enable();
617 #endif
618 }
619
620 static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
621 {
622         unsigned int x;
623
624         cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
625
626         x = __this_cpu_add_return(stats_updates, abs(val));
627         if (x > MEMCG_CHARGE_BATCH) {
628                 atomic_add(x / MEMCG_CHARGE_BATCH, &stats_flush_threshold);
629                 __this_cpu_write(stats_updates, 0);
630         }
631 }
632
633 static void __mem_cgroup_flush_stats(void)
634 {
635         unsigned long flag;
636
637         if (!spin_trylock_irqsave(&stats_flush_lock, flag))
638                 return;
639
640         cgroup_rstat_flush_irqsafe(root_mem_cgroup->css.cgroup);
641         atomic_set(&stats_flush_threshold, 0);
642         spin_unlock_irqrestore(&stats_flush_lock, flag);
643 }
644
645 void mem_cgroup_flush_stats(void)
646 {
647         if (atomic_read(&stats_flush_threshold) > num_online_cpus())
648                 __mem_cgroup_flush_stats();
649 }
650
651 static void flush_memcg_stats_dwork(struct work_struct *w)
652 {
653         __mem_cgroup_flush_stats();
654         queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
655 }
656
657 /**
658  * __mod_memcg_state - update cgroup memory statistics
659  * @memcg: the memory cgroup
660  * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
661  * @val: delta to add to the counter, can be negative
662  */
663 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
664 {
665         if (mem_cgroup_disabled())
666                 return;
667
668         __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
669         memcg_rstat_updated(memcg, val);
670 }
671
672 /* idx can be of type enum memcg_stat_item or node_stat_item. */
673 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
674 {
675         long x = 0;
676         int cpu;
677
678         for_each_possible_cpu(cpu)
679                 x += per_cpu(memcg->vmstats_percpu->state[idx], cpu);
680 #ifdef CONFIG_SMP
681         if (x < 0)
682                 x = 0;
683 #endif
684         return x;
685 }
686
687 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
688                               int val)
689 {
690         struct mem_cgroup_per_node *pn;
691         struct mem_cgroup *memcg;
692
693         pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
694         memcg = pn->memcg;
695
696         /*
697          * The caller from rmap relay on disabled preemption becase they never
698          * update their counter from in-interrupt context. For these two
699          * counters we check that the update is never performed from an
700          * interrupt context while other caller need to have disabled interrupt.
701          */
702         __memcg_stats_lock();
703         if (IS_ENABLED(CONFIG_DEBUG_VM) && !IS_ENABLED(CONFIG_PREEMPT_RT)) {
704                 switch (idx) {
705                 case NR_ANON_MAPPED:
706                 case NR_FILE_MAPPED:
707                 case NR_ANON_THPS:
708                 case NR_SHMEM_PMDMAPPED:
709                 case NR_FILE_PMDMAPPED:
710                         WARN_ON_ONCE(!in_task());
711                         break;
712                 default:
713                         WARN_ON_ONCE(!irqs_disabled());
714                 }
715         }
716
717         /* Update memcg */
718         __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
719
720         /* Update lruvec */
721         __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
722
723         memcg_rstat_updated(memcg, val);
724         memcg_stats_unlock();
725 }
726
727 /**
728  * __mod_lruvec_state - update lruvec memory statistics
729  * @lruvec: the lruvec
730  * @idx: the stat item
731  * @val: delta to add to the counter, can be negative
732  *
733  * The lruvec is the intersection of the NUMA node and a cgroup. This
734  * function updates the all three counters that are affected by a
735  * change of state at this level: per-node, per-cgroup, per-lruvec.
736  */
737 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
738                         int val)
739 {
740         /* Update node */
741         __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
742
743         /* Update memcg and lruvec */
744         if (!mem_cgroup_disabled())
745                 __mod_memcg_lruvec_state(lruvec, idx, val);
746 }
747
748 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
749                              int val)
750 {
751         struct page *head = compound_head(page); /* rmap on tail pages */
752         struct mem_cgroup *memcg;
753         pg_data_t *pgdat = page_pgdat(page);
754         struct lruvec *lruvec;
755
756         rcu_read_lock();
757         memcg = page_memcg(head);
758         /* Untracked pages have no memcg, no lruvec. Update only the node */
759         if (!memcg) {
760                 rcu_read_unlock();
761                 __mod_node_page_state(pgdat, idx, val);
762                 return;
763         }
764
765         lruvec = mem_cgroup_lruvec(memcg, pgdat);
766         __mod_lruvec_state(lruvec, idx, val);
767         rcu_read_unlock();
768 }
769 EXPORT_SYMBOL(__mod_lruvec_page_state);
770
771 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
772 {
773         pg_data_t *pgdat = page_pgdat(virt_to_page(p));
774         struct mem_cgroup *memcg;
775         struct lruvec *lruvec;
776
777         rcu_read_lock();
778         memcg = mem_cgroup_from_obj(p);
779
780         /*
781          * Untracked pages have no memcg, no lruvec. Update only the
782          * node. If we reparent the slab objects to the root memcg,
783          * when we free the slab object, we need to update the per-memcg
784          * vmstats to keep it correct for the root memcg.
785          */
786         if (!memcg) {
787                 __mod_node_page_state(pgdat, idx, val);
788         } else {
789                 lruvec = mem_cgroup_lruvec(memcg, pgdat);
790                 __mod_lruvec_state(lruvec, idx, val);
791         }
792         rcu_read_unlock();
793 }
794
795 /**
796  * __count_memcg_events - account VM events in a cgroup
797  * @memcg: the memory cgroup
798  * @idx: the event item
799  * @count: the number of events that occurred
800  */
801 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
802                           unsigned long count)
803 {
804         if (mem_cgroup_disabled())
805                 return;
806
807         memcg_stats_lock();
808         __this_cpu_add(memcg->vmstats_percpu->events[idx], count);
809         memcg_rstat_updated(memcg, count);
810         memcg_stats_unlock();
811 }
812
813 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
814 {
815         return READ_ONCE(memcg->vmstats.events[event]);
816 }
817
818 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
819 {
820         long x = 0;
821         int cpu;
822
823         for_each_possible_cpu(cpu)
824                 x += per_cpu(memcg->vmstats_percpu->events[event], cpu);
825         return x;
826 }
827
828 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
829                                          int nr_pages)
830 {
831         /* pagein of a big page is an event. So, ignore page size */
832         if (nr_pages > 0)
833                 __count_memcg_events(memcg, PGPGIN, 1);
834         else {
835                 __count_memcg_events(memcg, PGPGOUT, 1);
836                 nr_pages = -nr_pages; /* for event */
837         }
838
839         __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
840 }
841
842 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
843                                        enum mem_cgroup_events_target target)
844 {
845         unsigned long val, next;
846
847         val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
848         next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
849         /* from time_after() in jiffies.h */
850         if ((long)(next - val) < 0) {
851                 switch (target) {
852                 case MEM_CGROUP_TARGET_THRESH:
853                         next = val + THRESHOLDS_EVENTS_TARGET;
854                         break;
855                 case MEM_CGROUP_TARGET_SOFTLIMIT:
856                         next = val + SOFTLIMIT_EVENTS_TARGET;
857                         break;
858                 default:
859                         break;
860                 }
861                 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
862                 return true;
863         }
864         return false;
865 }
866
867 /*
868  * Check events in order.
869  *
870  */
871 static void memcg_check_events(struct mem_cgroup *memcg, int nid)
872 {
873         if (IS_ENABLED(CONFIG_PREEMPT_RT))
874                 return;
875
876         /* threshold event is triggered in finer grain than soft limit */
877         if (unlikely(mem_cgroup_event_ratelimit(memcg,
878                                                 MEM_CGROUP_TARGET_THRESH))) {
879                 bool do_softlimit;
880
881                 do_softlimit = mem_cgroup_event_ratelimit(memcg,
882                                                 MEM_CGROUP_TARGET_SOFTLIMIT);
883                 mem_cgroup_threshold(memcg);
884                 if (unlikely(do_softlimit))
885                         mem_cgroup_update_tree(memcg, nid);
886         }
887 }
888
889 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
890 {
891         /*
892          * mm_update_next_owner() may clear mm->owner to NULL
893          * if it races with swapoff, page migration, etc.
894          * So this can be called with p == NULL.
895          */
896         if (unlikely(!p))
897                 return NULL;
898
899         return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
900 }
901 EXPORT_SYMBOL(mem_cgroup_from_task);
902
903 static __always_inline struct mem_cgroup *active_memcg(void)
904 {
905         if (!in_task())
906                 return this_cpu_read(int_active_memcg);
907         else
908                 return current->active_memcg;
909 }
910
911 /**
912  * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
913  * @mm: mm from which memcg should be extracted. It can be NULL.
914  *
915  * Obtain a reference on mm->memcg and returns it if successful. If mm
916  * is NULL, then the memcg is chosen as follows:
917  * 1) The active memcg, if set.
918  * 2) current->mm->memcg, if available
919  * 3) root memcg
920  * If mem_cgroup is disabled, NULL is returned.
921  */
922 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
923 {
924         struct mem_cgroup *memcg;
925
926         if (mem_cgroup_disabled())
927                 return NULL;
928
929         /*
930          * Page cache insertions can happen without an
931          * actual mm context, e.g. during disk probing
932          * on boot, loopback IO, acct() writes etc.
933          *
934          * No need to css_get on root memcg as the reference
935          * counting is disabled on the root level in the
936          * cgroup core. See CSS_NO_REF.
937          */
938         if (unlikely(!mm)) {
939                 memcg = active_memcg();
940                 if (unlikely(memcg)) {
941                         /* remote memcg must hold a ref */
942                         css_get(&memcg->css);
943                         return memcg;
944                 }
945                 mm = current->mm;
946                 if (unlikely(!mm))
947                         return root_mem_cgroup;
948         }
949
950         rcu_read_lock();
951         do {
952                 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
953                 if (unlikely(!memcg))
954                         memcg = root_mem_cgroup;
955         } while (!css_tryget(&memcg->css));
956         rcu_read_unlock();
957         return memcg;
958 }
959 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
960
961 static __always_inline bool memcg_kmem_bypass(void)
962 {
963         /* Allow remote memcg charging from any context. */
964         if (unlikely(active_memcg()))
965                 return false;
966
967         /* Memcg to charge can't be determined. */
968         if (!in_task() || !current->mm || (current->flags & PF_KTHREAD))
969                 return true;
970
971         return false;
972 }
973
974 /**
975  * mem_cgroup_iter - iterate over memory cgroup hierarchy
976  * @root: hierarchy root
977  * @prev: previously returned memcg, NULL on first invocation
978  * @reclaim: cookie for shared reclaim walks, NULL for full walks
979  *
980  * Returns references to children of the hierarchy below @root, or
981  * @root itself, or %NULL after a full round-trip.
982  *
983  * Caller must pass the return value in @prev on subsequent
984  * invocations for reference counting, or use mem_cgroup_iter_break()
985  * to cancel a hierarchy walk before the round-trip is complete.
986  *
987  * Reclaimers can specify a node in @reclaim to divide up the memcgs
988  * in the hierarchy among all concurrent reclaimers operating on the
989  * same node.
990  */
991 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
992                                    struct mem_cgroup *prev,
993                                    struct mem_cgroup_reclaim_cookie *reclaim)
994 {
995         struct mem_cgroup_reclaim_iter *iter;
996         struct cgroup_subsys_state *css = NULL;
997         struct mem_cgroup *memcg = NULL;
998         struct mem_cgroup *pos = NULL;
999
1000         if (mem_cgroup_disabled())
1001                 return NULL;
1002
1003         if (!root)
1004                 root = root_mem_cgroup;
1005
1006         if (prev && !reclaim)
1007                 pos = prev;
1008
1009         rcu_read_lock();
1010
1011         if (reclaim) {
1012                 struct mem_cgroup_per_node *mz;
1013
1014                 mz = root->nodeinfo[reclaim->pgdat->node_id];
1015                 iter = &mz->iter;
1016
1017                 if (prev && reclaim->generation != iter->generation)
1018                         goto out_unlock;
1019
1020                 while (1) {
1021                         pos = READ_ONCE(iter->position);
1022                         if (!pos || css_tryget(&pos->css))
1023                                 break;
1024                         /*
1025                          * css reference reached zero, so iter->position will
1026                          * be cleared by ->css_released. However, we should not
1027                          * rely on this happening soon, because ->css_released
1028                          * is called from a work queue, and by busy-waiting we
1029                          * might block it. So we clear iter->position right
1030                          * away.
1031                          */
1032                         (void)cmpxchg(&iter->position, pos, NULL);
1033                 }
1034         }
1035
1036         if (pos)
1037                 css = &pos->css;
1038
1039         for (;;) {
1040                 css = css_next_descendant_pre(css, &root->css);
1041                 if (!css) {
1042                         /*
1043                          * Reclaimers share the hierarchy walk, and a
1044                          * new one might jump in right at the end of
1045                          * the hierarchy - make sure they see at least
1046                          * one group and restart from the beginning.
1047                          */
1048                         if (!prev)
1049                                 continue;
1050                         break;
1051                 }
1052
1053                 /*
1054                  * Verify the css and acquire a reference.  The root
1055                  * is provided by the caller, so we know it's alive
1056                  * and kicking, and don't take an extra reference.
1057                  */
1058                 memcg = mem_cgroup_from_css(css);
1059
1060                 if (css == &root->css)
1061                         break;
1062
1063                 if (css_tryget(css))
1064                         break;
1065
1066                 memcg = NULL;
1067         }
1068
1069         if (reclaim) {
1070                 /*
1071                  * The position could have already been updated by a competing
1072                  * thread, so check that the value hasn't changed since we read
1073                  * it to avoid reclaiming from the same cgroup twice.
1074                  */
1075                 (void)cmpxchg(&iter->position, pos, memcg);
1076
1077                 if (pos)
1078                         css_put(&pos->css);
1079
1080                 if (!memcg)
1081                         iter->generation++;
1082                 else if (!prev)
1083                         reclaim->generation = iter->generation;
1084         }
1085
1086 out_unlock:
1087         rcu_read_unlock();
1088         if (prev && prev != root)
1089                 css_put(&prev->css);
1090
1091         return memcg;
1092 }
1093
1094 /**
1095  * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1096  * @root: hierarchy root
1097  * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1098  */
1099 void mem_cgroup_iter_break(struct mem_cgroup *root,
1100                            struct mem_cgroup *prev)
1101 {
1102         if (!root)
1103                 root = root_mem_cgroup;
1104         if (prev && prev != root)
1105                 css_put(&prev->css);
1106 }
1107
1108 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1109                                         struct mem_cgroup *dead_memcg)
1110 {
1111         struct mem_cgroup_reclaim_iter *iter;
1112         struct mem_cgroup_per_node *mz;
1113         int nid;
1114
1115         for_each_node(nid) {
1116                 mz = from->nodeinfo[nid];
1117                 iter = &mz->iter;
1118                 cmpxchg(&iter->position, dead_memcg, NULL);
1119         }
1120 }
1121
1122 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1123 {
1124         struct mem_cgroup *memcg = dead_memcg;
1125         struct mem_cgroup *last;
1126
1127         do {
1128                 __invalidate_reclaim_iterators(memcg, dead_memcg);
1129                 last = memcg;
1130         } while ((memcg = parent_mem_cgroup(memcg)));
1131
1132         /*
1133          * When cgruop1 non-hierarchy mode is used,
1134          * parent_mem_cgroup() does not walk all the way up to the
1135          * cgroup root (root_mem_cgroup). So we have to handle
1136          * dead_memcg from cgroup root separately.
1137          */
1138         if (last != root_mem_cgroup)
1139                 __invalidate_reclaim_iterators(root_mem_cgroup,
1140                                                 dead_memcg);
1141 }
1142
1143 /**
1144  * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1145  * @memcg: hierarchy root
1146  * @fn: function to call for each task
1147  * @arg: argument passed to @fn
1148  *
1149  * This function iterates over tasks attached to @memcg or to any of its
1150  * descendants and calls @fn for each task. If @fn returns a non-zero
1151  * value, the function breaks the iteration loop and returns the value.
1152  * Otherwise, it will iterate over all tasks and return 0.
1153  *
1154  * This function must not be called for the root memory cgroup.
1155  */
1156 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1157                           int (*fn)(struct task_struct *, void *), void *arg)
1158 {
1159         struct mem_cgroup *iter;
1160         int ret = 0;
1161
1162         BUG_ON(memcg == root_mem_cgroup);
1163
1164         for_each_mem_cgroup_tree(iter, memcg) {
1165                 struct css_task_iter it;
1166                 struct task_struct *task;
1167
1168                 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1169                 while (!ret && (task = css_task_iter_next(&it)))
1170                         ret = fn(task, arg);
1171                 css_task_iter_end(&it);
1172                 if (ret) {
1173                         mem_cgroup_iter_break(memcg, iter);
1174                         break;
1175                 }
1176         }
1177         return ret;
1178 }
1179
1180 #ifdef CONFIG_DEBUG_VM
1181 void lruvec_memcg_debug(struct lruvec *lruvec, struct folio *folio)
1182 {
1183         struct mem_cgroup *memcg;
1184
1185         if (mem_cgroup_disabled())
1186                 return;
1187
1188         memcg = folio_memcg(folio);
1189
1190         if (!memcg)
1191                 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != root_mem_cgroup, folio);
1192         else
1193                 VM_BUG_ON_FOLIO(lruvec_memcg(lruvec) != memcg, folio);
1194 }
1195 #endif
1196
1197 /**
1198  * folio_lruvec_lock - Lock the lruvec for a folio.
1199  * @folio: Pointer to the folio.
1200  *
1201  * These functions are safe to use under any of the following conditions:
1202  * - folio locked
1203  * - folio_test_lru false
1204  * - folio_memcg_lock()
1205  * - folio frozen (refcount of 0)
1206  *
1207  * Return: The lruvec this folio is on with its lock held.
1208  */
1209 struct lruvec *folio_lruvec_lock(struct folio *folio)
1210 {
1211         struct lruvec *lruvec = folio_lruvec(folio);
1212
1213         spin_lock(&lruvec->lru_lock);
1214         lruvec_memcg_debug(lruvec, folio);
1215
1216         return lruvec;
1217 }
1218
1219 /**
1220  * folio_lruvec_lock_irq - Lock the lruvec for a folio.
1221  * @folio: Pointer to the folio.
1222  *
1223  * These functions are safe to use under any of the following conditions:
1224  * - folio locked
1225  * - folio_test_lru false
1226  * - folio_memcg_lock()
1227  * - folio frozen (refcount of 0)
1228  *
1229  * Return: The lruvec this folio is on with its lock held and interrupts
1230  * disabled.
1231  */
1232 struct lruvec *folio_lruvec_lock_irq(struct folio *folio)
1233 {
1234         struct lruvec *lruvec = folio_lruvec(folio);
1235
1236         spin_lock_irq(&lruvec->lru_lock);
1237         lruvec_memcg_debug(lruvec, folio);
1238
1239         return lruvec;
1240 }
1241
1242 /**
1243  * folio_lruvec_lock_irqsave - Lock the lruvec for a folio.
1244  * @folio: Pointer to the folio.
1245  * @flags: Pointer to irqsave flags.
1246  *
1247  * These functions are safe to use under any of the following conditions:
1248  * - folio locked
1249  * - folio_test_lru false
1250  * - folio_memcg_lock()
1251  * - folio frozen (refcount of 0)
1252  *
1253  * Return: The lruvec this folio is on with its lock held and interrupts
1254  * disabled.
1255  */
1256 struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
1257                 unsigned long *flags)
1258 {
1259         struct lruvec *lruvec = folio_lruvec(folio);
1260
1261         spin_lock_irqsave(&lruvec->lru_lock, *flags);
1262         lruvec_memcg_debug(lruvec, folio);
1263
1264         return lruvec;
1265 }
1266
1267 /**
1268  * mem_cgroup_update_lru_size - account for adding or removing an lru page
1269  * @lruvec: mem_cgroup per zone lru vector
1270  * @lru: index of lru list the page is sitting on
1271  * @zid: zone id of the accounted pages
1272  * @nr_pages: positive when adding or negative when removing
1273  *
1274  * This function must be called under lru_lock, just before a page is added
1275  * to or just after a page is removed from an lru list.
1276  */
1277 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1278                                 int zid, int nr_pages)
1279 {
1280         struct mem_cgroup_per_node *mz;
1281         unsigned long *lru_size;
1282         long size;
1283
1284         if (mem_cgroup_disabled())
1285                 return;
1286
1287         mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1288         lru_size = &mz->lru_zone_size[zid][lru];
1289
1290         if (nr_pages < 0)
1291                 *lru_size += nr_pages;
1292
1293         size = *lru_size;
1294         if (WARN_ONCE(size < 0,
1295                 "%s(%p, %d, %d): lru_size %ld\n",
1296                 __func__, lruvec, lru, nr_pages, size)) {
1297                 VM_BUG_ON(1);
1298                 *lru_size = 0;
1299         }
1300
1301         if (nr_pages > 0)
1302                 *lru_size += nr_pages;
1303 }
1304
1305 /**
1306  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1307  * @memcg: the memory cgroup
1308  *
1309  * Returns the maximum amount of memory @mem can be charged with, in
1310  * pages.
1311  */
1312 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1313 {
1314         unsigned long margin = 0;
1315         unsigned long count;
1316         unsigned long limit;
1317
1318         count = page_counter_read(&memcg->memory);
1319         limit = READ_ONCE(memcg->memory.max);
1320         if (count < limit)
1321                 margin = limit - count;
1322
1323         if (do_memsw_account()) {
1324                 count = page_counter_read(&memcg->memsw);
1325                 limit = READ_ONCE(memcg->memsw.max);
1326                 if (count < limit)
1327                         margin = min(margin, limit - count);
1328                 else
1329                         margin = 0;
1330         }
1331
1332         return margin;
1333 }
1334
1335 /*
1336  * A routine for checking "mem" is under move_account() or not.
1337  *
1338  * Checking a cgroup is mc.from or mc.to or under hierarchy of
1339  * moving cgroups. This is for waiting at high-memory pressure
1340  * caused by "move".
1341  */
1342 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1343 {
1344         struct mem_cgroup *from;
1345         struct mem_cgroup *to;
1346         bool ret = false;
1347         /*
1348          * Unlike task_move routines, we access mc.to, mc.from not under
1349          * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1350          */
1351         spin_lock(&mc.lock);
1352         from = mc.from;
1353         to = mc.to;
1354         if (!from)
1355                 goto unlock;
1356
1357         ret = mem_cgroup_is_descendant(from, memcg) ||
1358                 mem_cgroup_is_descendant(to, memcg);
1359 unlock:
1360         spin_unlock(&mc.lock);
1361         return ret;
1362 }
1363
1364 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1365 {
1366         if (mc.moving_task && current != mc.moving_task) {
1367                 if (mem_cgroup_under_move(memcg)) {
1368                         DEFINE_WAIT(wait);
1369                         prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1370                         /* moving charge context might have finished. */
1371                         if (mc.moving_task)
1372                                 schedule();
1373                         finish_wait(&mc.waitq, &wait);
1374                         return true;
1375                 }
1376         }
1377         return false;
1378 }
1379
1380 struct memory_stat {
1381         const char *name;
1382         unsigned int idx;
1383 };
1384
1385 static const struct memory_stat memory_stats[] = {
1386         { "anon",                       NR_ANON_MAPPED                  },
1387         { "file",                       NR_FILE_PAGES                   },
1388         { "kernel",                     MEMCG_KMEM                      },
1389         { "kernel_stack",               NR_KERNEL_STACK_KB              },
1390         { "pagetables",                 NR_PAGETABLE                    },
1391         { "percpu",                     MEMCG_PERCPU_B                  },
1392         { "sock",                       MEMCG_SOCK                      },
1393         { "vmalloc",                    MEMCG_VMALLOC                   },
1394         { "shmem",                      NR_SHMEM                        },
1395         { "file_mapped",                NR_FILE_MAPPED                  },
1396         { "file_dirty",                 NR_FILE_DIRTY                   },
1397         { "file_writeback",             NR_WRITEBACK                    },
1398 #ifdef CONFIG_SWAP
1399         { "swapcached",                 NR_SWAPCACHE                    },
1400 #endif
1401 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1402         { "anon_thp",                   NR_ANON_THPS                    },
1403         { "file_thp",                   NR_FILE_THPS                    },
1404         { "shmem_thp",                  NR_SHMEM_THPS                   },
1405 #endif
1406         { "inactive_anon",              NR_INACTIVE_ANON                },
1407         { "active_anon",                NR_ACTIVE_ANON                  },
1408         { "inactive_file",              NR_INACTIVE_FILE                },
1409         { "active_file",                NR_ACTIVE_FILE                  },
1410         { "unevictable",                NR_UNEVICTABLE                  },
1411         { "slab_reclaimable",           NR_SLAB_RECLAIMABLE_B           },
1412         { "slab_unreclaimable",         NR_SLAB_UNRECLAIMABLE_B         },
1413
1414         /* The memory events */
1415         { "workingset_refault_anon",    WORKINGSET_REFAULT_ANON         },
1416         { "workingset_refault_file",    WORKINGSET_REFAULT_FILE         },
1417         { "workingset_activate_anon",   WORKINGSET_ACTIVATE_ANON        },
1418         { "workingset_activate_file",   WORKINGSET_ACTIVATE_FILE        },
1419         { "workingset_restore_anon",    WORKINGSET_RESTORE_ANON         },
1420         { "workingset_restore_file",    WORKINGSET_RESTORE_FILE         },
1421         { "workingset_nodereclaim",     WORKINGSET_NODERECLAIM          },
1422 };
1423
1424 /* Translate stat items to the correct unit for memory.stat output */
1425 static int memcg_page_state_unit(int item)
1426 {
1427         switch (item) {
1428         case MEMCG_PERCPU_B:
1429         case NR_SLAB_RECLAIMABLE_B:
1430         case NR_SLAB_UNRECLAIMABLE_B:
1431         case WORKINGSET_REFAULT_ANON:
1432         case WORKINGSET_REFAULT_FILE:
1433         case WORKINGSET_ACTIVATE_ANON:
1434         case WORKINGSET_ACTIVATE_FILE:
1435         case WORKINGSET_RESTORE_ANON:
1436         case WORKINGSET_RESTORE_FILE:
1437         case WORKINGSET_NODERECLAIM:
1438                 return 1;
1439         case NR_KERNEL_STACK_KB:
1440                 return SZ_1K;
1441         default:
1442                 return PAGE_SIZE;
1443         }
1444 }
1445
1446 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1447                                                     int item)
1448 {
1449         return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
1450 }
1451
1452 static char *memory_stat_format(struct mem_cgroup *memcg)
1453 {
1454         struct seq_buf s;
1455         int i;
1456
1457         seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1458         if (!s.buffer)
1459                 return NULL;
1460
1461         /*
1462          * Provide statistics on the state of the memory subsystem as
1463          * well as cumulative event counters that show past behavior.
1464          *
1465          * This list is ordered following a combination of these gradients:
1466          * 1) generic big picture -> specifics and details
1467          * 2) reflecting userspace activity -> reflecting kernel heuristics
1468          *
1469          * Current memory state:
1470          */
1471         mem_cgroup_flush_stats();
1472
1473         for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1474                 u64 size;
1475
1476                 size = memcg_page_state_output(memcg, memory_stats[i].idx);
1477                 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
1478
1479                 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1480                         size += memcg_page_state_output(memcg,
1481                                                         NR_SLAB_RECLAIMABLE_B);
1482                         seq_buf_printf(&s, "slab %llu\n", size);
1483                 }
1484         }
1485
1486         /* Accumulated memory events */
1487
1488         seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
1489                        memcg_events(memcg, PGFAULT));
1490         seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
1491                        memcg_events(memcg, PGMAJFAULT));
1492         seq_buf_printf(&s, "%s %lu\n",  vm_event_name(PGREFILL),
1493                        memcg_events(memcg, PGREFILL));
1494         seq_buf_printf(&s, "pgscan %lu\n",
1495                        memcg_events(memcg, PGSCAN_KSWAPD) +
1496                        memcg_events(memcg, PGSCAN_DIRECT));
1497         seq_buf_printf(&s, "pgsteal %lu\n",
1498                        memcg_events(memcg, PGSTEAL_KSWAPD) +
1499                        memcg_events(memcg, PGSTEAL_DIRECT));
1500         seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
1501                        memcg_events(memcg, PGACTIVATE));
1502         seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
1503                        memcg_events(memcg, PGDEACTIVATE));
1504         seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
1505                        memcg_events(memcg, PGLAZYFREE));
1506         seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
1507                        memcg_events(memcg, PGLAZYFREED));
1508
1509 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1510         seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
1511                        memcg_events(memcg, THP_FAULT_ALLOC));
1512         seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
1513                        memcg_events(memcg, THP_COLLAPSE_ALLOC));
1514 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1515
1516         /* The above should easily fit into one page */
1517         WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1518
1519         return s.buffer;
1520 }
1521
1522 #define K(x) ((x) << (PAGE_SHIFT-10))
1523 /**
1524  * mem_cgroup_print_oom_context: Print OOM information relevant to
1525  * memory controller.
1526  * @memcg: The memory cgroup that went over limit
1527  * @p: Task that is going to be killed
1528  *
1529  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1530  * enabled
1531  */
1532 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1533 {
1534         rcu_read_lock();
1535
1536         if (memcg) {
1537                 pr_cont(",oom_memcg=");
1538                 pr_cont_cgroup_path(memcg->css.cgroup);
1539         } else
1540                 pr_cont(",global_oom");
1541         if (p) {
1542                 pr_cont(",task_memcg=");
1543                 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1544         }
1545         rcu_read_unlock();
1546 }
1547
1548 /**
1549  * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1550  * memory controller.
1551  * @memcg: The memory cgroup that went over limit
1552  */
1553 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1554 {
1555         char *buf;
1556
1557         pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1558                 K((u64)page_counter_read(&memcg->memory)),
1559                 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1560         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1561                 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1562                         K((u64)page_counter_read(&memcg->swap)),
1563                         K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1564         else {
1565                 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1566                         K((u64)page_counter_read(&memcg->memsw)),
1567                         K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1568                 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1569                         K((u64)page_counter_read(&memcg->kmem)),
1570                         K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1571         }
1572
1573         pr_info("Memory cgroup stats for ");
1574         pr_cont_cgroup_path(memcg->css.cgroup);
1575         pr_cont(":");
1576         buf = memory_stat_format(memcg);
1577         if (!buf)
1578                 return;
1579         pr_info("%s", buf);
1580         kfree(buf);
1581 }
1582
1583 /*
1584  * Return the memory (and swap, if configured) limit for a memcg.
1585  */
1586 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1587 {
1588         unsigned long max = READ_ONCE(memcg->memory.max);
1589
1590         if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
1591                 if (mem_cgroup_swappiness(memcg))
1592                         max += min(READ_ONCE(memcg->swap.max),
1593                                    (unsigned long)total_swap_pages);
1594         } else { /* v1 */
1595                 if (mem_cgroup_swappiness(memcg)) {
1596                         /* Calculate swap excess capacity from memsw limit */
1597                         unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1598
1599                         max += min(swap, (unsigned long)total_swap_pages);
1600                 }
1601         }
1602         return max;
1603 }
1604
1605 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1606 {
1607         return page_counter_read(&memcg->memory);
1608 }
1609
1610 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1611                                      int order)
1612 {
1613         struct oom_control oc = {
1614                 .zonelist = NULL,
1615                 .nodemask = NULL,
1616                 .memcg = memcg,
1617                 .gfp_mask = gfp_mask,
1618                 .order = order,
1619         };
1620         bool ret = true;
1621
1622         if (mutex_lock_killable(&oom_lock))
1623                 return true;
1624
1625         if (mem_cgroup_margin(memcg) >= (1 << order))
1626                 goto unlock;
1627
1628         /*
1629          * A few threads which were not waiting at mutex_lock_killable() can
1630          * fail to bail out. Therefore, check again after holding oom_lock.
1631          */
1632         ret = task_is_dying() || out_of_memory(&oc);
1633
1634 unlock:
1635         mutex_unlock(&oom_lock);
1636         return ret;
1637 }
1638
1639 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1640                                    pg_data_t *pgdat,
1641                                    gfp_t gfp_mask,
1642                                    unsigned long *total_scanned)
1643 {
1644         struct mem_cgroup *victim = NULL;
1645         int total = 0;
1646         int loop = 0;
1647         unsigned long excess;
1648         unsigned long nr_scanned;
1649         struct mem_cgroup_reclaim_cookie reclaim = {
1650                 .pgdat = pgdat,
1651         };
1652
1653         excess = soft_limit_excess(root_memcg);
1654
1655         while (1) {
1656                 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1657                 if (!victim) {
1658                         loop++;
1659                         if (loop >= 2) {
1660                                 /*
1661                                  * If we have not been able to reclaim
1662                                  * anything, it might because there are
1663                                  * no reclaimable pages under this hierarchy
1664                                  */
1665                                 if (!total)
1666                                         break;
1667                                 /*
1668                                  * We want to do more targeted reclaim.
1669                                  * excess >> 2 is not to excessive so as to
1670                                  * reclaim too much, nor too less that we keep
1671                                  * coming back to reclaim from this cgroup
1672                                  */
1673                                 if (total >= (excess >> 2) ||
1674                                         (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1675                                         break;
1676                         }
1677                         continue;
1678                 }
1679                 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1680                                         pgdat, &nr_scanned);
1681                 *total_scanned += nr_scanned;
1682                 if (!soft_limit_excess(root_memcg))
1683                         break;
1684         }
1685         mem_cgroup_iter_break(root_memcg, victim);
1686         return total;
1687 }
1688
1689 #ifdef CONFIG_LOCKDEP
1690 static struct lockdep_map memcg_oom_lock_dep_map = {
1691         .name = "memcg_oom_lock",
1692 };
1693 #endif
1694
1695 static DEFINE_SPINLOCK(memcg_oom_lock);
1696
1697 /*
1698  * Check OOM-Killer is already running under our hierarchy.
1699  * If someone is running, return false.
1700  */
1701 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1702 {
1703         struct mem_cgroup *iter, *failed = NULL;
1704
1705         spin_lock(&memcg_oom_lock);
1706
1707         for_each_mem_cgroup_tree(iter, memcg) {
1708                 if (iter->oom_lock) {
1709                         /*
1710                          * this subtree of our hierarchy is already locked
1711                          * so we cannot give a lock.
1712                          */
1713                         failed = iter;
1714                         mem_cgroup_iter_break(memcg, iter);
1715                         break;
1716                 } else
1717                         iter->oom_lock = true;
1718         }
1719
1720         if (failed) {
1721                 /*
1722                  * OK, we failed to lock the whole subtree so we have
1723                  * to clean up what we set up to the failing subtree
1724                  */
1725                 for_each_mem_cgroup_tree(iter, memcg) {
1726                         if (iter == failed) {
1727                                 mem_cgroup_iter_break(memcg, iter);
1728                                 break;
1729                         }
1730                         iter->oom_lock = false;
1731                 }
1732         } else
1733                 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1734
1735         spin_unlock(&memcg_oom_lock);
1736
1737         return !failed;
1738 }
1739
1740 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1741 {
1742         struct mem_cgroup *iter;
1743
1744         spin_lock(&memcg_oom_lock);
1745         mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1746         for_each_mem_cgroup_tree(iter, memcg)
1747                 iter->oom_lock = false;
1748         spin_unlock(&memcg_oom_lock);
1749 }
1750
1751 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1752 {
1753         struct mem_cgroup *iter;
1754
1755         spin_lock(&memcg_oom_lock);
1756         for_each_mem_cgroup_tree(iter, memcg)
1757                 iter->under_oom++;
1758         spin_unlock(&memcg_oom_lock);
1759 }
1760
1761 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1762 {
1763         struct mem_cgroup *iter;
1764
1765         /*
1766          * Be careful about under_oom underflows because a child memcg
1767          * could have been added after mem_cgroup_mark_under_oom.
1768          */
1769         spin_lock(&memcg_oom_lock);
1770         for_each_mem_cgroup_tree(iter, memcg)
1771                 if (iter->under_oom > 0)
1772                         iter->under_oom--;
1773         spin_unlock(&memcg_oom_lock);
1774 }
1775
1776 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1777
1778 struct oom_wait_info {
1779         struct mem_cgroup *memcg;
1780         wait_queue_entry_t      wait;
1781 };
1782
1783 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1784         unsigned mode, int sync, void *arg)
1785 {
1786         struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1787         struct mem_cgroup *oom_wait_memcg;
1788         struct oom_wait_info *oom_wait_info;
1789
1790         oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1791         oom_wait_memcg = oom_wait_info->memcg;
1792
1793         if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1794             !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1795                 return 0;
1796         return autoremove_wake_function(wait, mode, sync, arg);
1797 }
1798
1799 static void memcg_oom_recover(struct mem_cgroup *memcg)
1800 {
1801         /*
1802          * For the following lockless ->under_oom test, the only required
1803          * guarantee is that it must see the state asserted by an OOM when
1804          * this function is called as a result of userland actions
1805          * triggered by the notification of the OOM.  This is trivially
1806          * achieved by invoking mem_cgroup_mark_under_oom() before
1807          * triggering notification.
1808          */
1809         if (memcg && memcg->under_oom)
1810                 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1811 }
1812
1813 /*
1814  * Returns true if successfully killed one or more processes. Though in some
1815  * corner cases it can return true even without killing any process.
1816  */
1817 static bool mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1818 {
1819         bool locked, ret;
1820
1821         if (order > PAGE_ALLOC_COSTLY_ORDER)
1822                 return false;
1823
1824         memcg_memory_event(memcg, MEMCG_OOM);
1825
1826         /*
1827          * We are in the middle of the charge context here, so we
1828          * don't want to block when potentially sitting on a callstack
1829          * that holds all kinds of filesystem and mm locks.
1830          *
1831          * cgroup1 allows disabling the OOM killer and waiting for outside
1832          * handling until the charge can succeed; remember the context and put
1833          * the task to sleep at the end of the page fault when all locks are
1834          * released.
1835          *
1836          * On the other hand, in-kernel OOM killer allows for an async victim
1837          * memory reclaim (oom_reaper) and that means that we are not solely
1838          * relying on the oom victim to make a forward progress and we can
1839          * invoke the oom killer here.
1840          *
1841          * Please note that mem_cgroup_out_of_memory might fail to find a
1842          * victim and then we have to bail out from the charge path.
1843          */
1844         if (memcg->oom_kill_disable) {
1845                 if (current->in_user_fault) {
1846                         css_get(&memcg->css);
1847                         current->memcg_in_oom = memcg;
1848                         current->memcg_oom_gfp_mask = mask;
1849                         current->memcg_oom_order = order;
1850                 }
1851                 return false;
1852         }
1853
1854         mem_cgroup_mark_under_oom(memcg);
1855
1856         locked = mem_cgroup_oom_trylock(memcg);
1857
1858         if (locked)
1859                 mem_cgroup_oom_notify(memcg);
1860
1861         mem_cgroup_unmark_under_oom(memcg);
1862         ret = mem_cgroup_out_of_memory(memcg, mask, order);
1863
1864         if (locked)
1865                 mem_cgroup_oom_unlock(memcg);
1866
1867         return ret;
1868 }
1869
1870 /**
1871  * mem_cgroup_oom_synchronize - complete memcg OOM handling
1872  * @handle: actually kill/wait or just clean up the OOM state
1873  *
1874  * This has to be called at the end of a page fault if the memcg OOM
1875  * handler was enabled.
1876  *
1877  * Memcg supports userspace OOM handling where failed allocations must
1878  * sleep on a waitqueue until the userspace task resolves the
1879  * situation.  Sleeping directly in the charge context with all kinds
1880  * of locks held is not a good idea, instead we remember an OOM state
1881  * in the task and mem_cgroup_oom_synchronize() has to be called at
1882  * the end of the page fault to complete the OOM handling.
1883  *
1884  * Returns %true if an ongoing memcg OOM situation was detected and
1885  * completed, %false otherwise.
1886  */
1887 bool mem_cgroup_oom_synchronize(bool handle)
1888 {
1889         struct mem_cgroup *memcg = current->memcg_in_oom;
1890         struct oom_wait_info owait;
1891         bool locked;
1892
1893         /* OOM is global, do not handle */
1894         if (!memcg)
1895                 return false;
1896
1897         if (!handle)
1898                 goto cleanup;
1899
1900         owait.memcg = memcg;
1901         owait.wait.flags = 0;
1902         owait.wait.func = memcg_oom_wake_function;
1903         owait.wait.private = current;
1904         INIT_LIST_HEAD(&owait.wait.entry);
1905
1906         prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1907         mem_cgroup_mark_under_oom(memcg);
1908
1909         locked = mem_cgroup_oom_trylock(memcg);
1910
1911         if (locked)
1912                 mem_cgroup_oom_notify(memcg);
1913
1914         if (locked && !memcg->oom_kill_disable) {
1915                 mem_cgroup_unmark_under_oom(memcg);
1916                 finish_wait(&memcg_oom_waitq, &owait.wait);
1917                 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1918                                          current->memcg_oom_order);
1919         } else {
1920                 schedule();
1921                 mem_cgroup_unmark_under_oom(memcg);
1922                 finish_wait(&memcg_oom_waitq, &owait.wait);
1923         }
1924
1925         if (locked) {
1926                 mem_cgroup_oom_unlock(memcg);
1927                 /*
1928                  * There is no guarantee that an OOM-lock contender
1929                  * sees the wakeups triggered by the OOM kill
1930                  * uncharges.  Wake any sleepers explicitly.
1931                  */
1932                 memcg_oom_recover(memcg);
1933         }
1934 cleanup:
1935         current->memcg_in_oom = NULL;
1936         css_put(&memcg->css);
1937         return true;
1938 }
1939
1940 /**
1941  * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1942  * @victim: task to be killed by the OOM killer
1943  * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1944  *
1945  * Returns a pointer to a memory cgroup, which has to be cleaned up
1946  * by killing all belonging OOM-killable tasks.
1947  *
1948  * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1949  */
1950 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1951                                             struct mem_cgroup *oom_domain)
1952 {
1953         struct mem_cgroup *oom_group = NULL;
1954         struct mem_cgroup *memcg;
1955
1956         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1957                 return NULL;
1958
1959         if (!oom_domain)
1960                 oom_domain = root_mem_cgroup;
1961
1962         rcu_read_lock();
1963
1964         memcg = mem_cgroup_from_task(victim);
1965         if (memcg == root_mem_cgroup)
1966                 goto out;
1967
1968         /*
1969          * If the victim task has been asynchronously moved to a different
1970          * memory cgroup, we might end up killing tasks outside oom_domain.
1971          * In this case it's better to ignore memory.group.oom.
1972          */
1973         if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1974                 goto out;
1975
1976         /*
1977          * Traverse the memory cgroup hierarchy from the victim task's
1978          * cgroup up to the OOMing cgroup (or root) to find the
1979          * highest-level memory cgroup with oom.group set.
1980          */
1981         for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1982                 if (memcg->oom_group)
1983                         oom_group = memcg;
1984
1985                 if (memcg == oom_domain)
1986                         break;
1987         }
1988
1989         if (oom_group)
1990                 css_get(&oom_group->css);
1991 out:
1992         rcu_read_unlock();
1993
1994         return oom_group;
1995 }
1996
1997 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1998 {
1999         pr_info("Tasks in ");
2000         pr_cont_cgroup_path(memcg->css.cgroup);
2001         pr_cont(" are going to be killed due to memory.oom.group set\n");
2002 }
2003
2004 /**
2005  * folio_memcg_lock - Bind a folio to its memcg.
2006  * @folio: The folio.
2007  *
2008  * This function prevents unlocked LRU folios from being moved to
2009  * another cgroup.
2010  *
2011  * It ensures lifetime of the bound memcg.  The caller is responsible
2012  * for the lifetime of the folio.
2013  */
2014 void folio_memcg_lock(struct folio *folio)
2015 {
2016         struct mem_cgroup *memcg;
2017         unsigned long flags;
2018
2019         /*
2020          * The RCU lock is held throughout the transaction.  The fast
2021          * path can get away without acquiring the memcg->move_lock
2022          * because page moving starts with an RCU grace period.
2023          */
2024         rcu_read_lock();
2025
2026         if (mem_cgroup_disabled())
2027                 return;
2028 again:
2029         memcg = folio_memcg(folio);
2030         if (unlikely(!memcg))
2031                 return;
2032
2033 #ifdef CONFIG_PROVE_LOCKING
2034         local_irq_save(flags);
2035         might_lock(&memcg->move_lock);
2036         local_irq_restore(flags);
2037 #endif
2038
2039         if (atomic_read(&memcg->moving_account) <= 0)
2040                 return;
2041
2042         spin_lock_irqsave(&memcg->move_lock, flags);
2043         if (memcg != folio_memcg(folio)) {
2044                 spin_unlock_irqrestore(&memcg->move_lock, flags);
2045                 goto again;
2046         }
2047
2048         /*
2049          * When charge migration first begins, we can have multiple
2050          * critical sections holding the fast-path RCU lock and one
2051          * holding the slowpath move_lock. Track the task who has the
2052          * move_lock for unlock_page_memcg().
2053          */
2054         memcg->move_lock_task = current;
2055         memcg->move_lock_flags = flags;
2056 }
2057
2058 void lock_page_memcg(struct page *page)
2059 {
2060         folio_memcg_lock(page_folio(page));
2061 }
2062
2063 static void __folio_memcg_unlock(struct mem_cgroup *memcg)
2064 {
2065         if (memcg && memcg->move_lock_task == current) {
2066                 unsigned long flags = memcg->move_lock_flags;
2067
2068                 memcg->move_lock_task = NULL;
2069                 memcg->move_lock_flags = 0;
2070
2071                 spin_unlock_irqrestore(&memcg->move_lock, flags);
2072         }
2073
2074         rcu_read_unlock();
2075 }
2076
2077 /**
2078  * folio_memcg_unlock - Release the binding between a folio and its memcg.
2079  * @folio: The folio.
2080  *
2081  * This releases the binding created by folio_memcg_lock().  This does
2082  * not change the accounting of this folio to its memcg, but it does
2083  * permit others to change it.
2084  */
2085 void folio_memcg_unlock(struct folio *folio)
2086 {
2087         __folio_memcg_unlock(folio_memcg(folio));
2088 }
2089
2090 void unlock_page_memcg(struct page *page)
2091 {
2092         folio_memcg_unlock(page_folio(page));
2093 }
2094
2095 struct memcg_stock_pcp {
2096         local_lock_t stock_lock;
2097         struct mem_cgroup *cached; /* this never be root cgroup */
2098         unsigned int nr_pages;
2099
2100 #ifdef CONFIG_MEMCG_KMEM
2101         struct obj_cgroup *cached_objcg;
2102         struct pglist_data *cached_pgdat;
2103         unsigned int nr_bytes;
2104         int nr_slab_reclaimable_b;
2105         int nr_slab_unreclaimable_b;
2106 #endif
2107
2108         struct work_struct work;
2109         unsigned long flags;
2110 #define FLUSHING_CACHED_CHARGE  0
2111 };
2112 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock) = {
2113         .stock_lock = INIT_LOCAL_LOCK(stock_lock),
2114 };
2115 static DEFINE_MUTEX(percpu_charge_mutex);
2116
2117 #ifdef CONFIG_MEMCG_KMEM
2118 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock);
2119 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2120                                      struct mem_cgroup *root_memcg);
2121 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages);
2122
2123 #else
2124 static inline struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
2125 {
2126         return NULL;
2127 }
2128 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2129                                      struct mem_cgroup *root_memcg)
2130 {
2131         return false;
2132 }
2133 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2134 {
2135 }
2136 #endif
2137
2138 /**
2139  * consume_stock: Try to consume stocked charge on this cpu.
2140  * @memcg: memcg to consume from.
2141  * @nr_pages: how many pages to charge.
2142  *
2143  * The charges will only happen if @memcg matches the current cpu's memcg
2144  * stock, and at least @nr_pages are available in that stock.  Failure to
2145  * service an allocation will refill the stock.
2146  *
2147  * returns true if successful, false otherwise.
2148  */
2149 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2150 {
2151         struct memcg_stock_pcp *stock;
2152         unsigned long flags;
2153         bool ret = false;
2154
2155         if (nr_pages > MEMCG_CHARGE_BATCH)
2156                 return ret;
2157
2158         local_lock_irqsave(&memcg_stock.stock_lock, flags);
2159
2160         stock = this_cpu_ptr(&memcg_stock);
2161         if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2162                 stock->nr_pages -= nr_pages;
2163                 ret = true;
2164         }
2165
2166         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2167
2168         return ret;
2169 }
2170
2171 /*
2172  * Returns stocks cached in percpu and reset cached information.
2173  */
2174 static void drain_stock(struct memcg_stock_pcp *stock)
2175 {
2176         struct mem_cgroup *old = stock->cached;
2177
2178         if (!old)
2179                 return;
2180
2181         if (stock->nr_pages) {
2182                 page_counter_uncharge(&old->memory, stock->nr_pages);
2183                 if (do_memsw_account())
2184                         page_counter_uncharge(&old->memsw, stock->nr_pages);
2185                 stock->nr_pages = 0;
2186         }
2187
2188         css_put(&old->css);
2189         stock->cached = NULL;
2190 }
2191
2192 static void drain_local_stock(struct work_struct *dummy)
2193 {
2194         struct memcg_stock_pcp *stock;
2195         struct obj_cgroup *old = NULL;
2196         unsigned long flags;
2197
2198         /*
2199          * The only protection from cpu hotplug (memcg_hotplug_cpu_dead) vs.
2200          * drain_stock races is that we always operate on local CPU stock
2201          * here with IRQ disabled
2202          */
2203         local_lock_irqsave(&memcg_stock.stock_lock, flags);
2204
2205         stock = this_cpu_ptr(&memcg_stock);
2206         old = drain_obj_stock(stock);
2207         drain_stock(stock);
2208         clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2209
2210         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2211         if (old)
2212                 obj_cgroup_put(old);
2213 }
2214
2215 /*
2216  * Cache charges(val) to local per_cpu area.
2217  * This will be consumed by consume_stock() function, later.
2218  */
2219 static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2220 {
2221         struct memcg_stock_pcp *stock;
2222
2223         stock = this_cpu_ptr(&memcg_stock);
2224         if (stock->cached != memcg) { /* reset if necessary */
2225                 drain_stock(stock);
2226                 css_get(&memcg->css);
2227                 stock->cached = memcg;
2228         }
2229         stock->nr_pages += nr_pages;
2230
2231         if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2232                 drain_stock(stock);
2233 }
2234
2235 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2236 {
2237         unsigned long flags;
2238
2239         local_lock_irqsave(&memcg_stock.stock_lock, flags);
2240         __refill_stock(memcg, nr_pages);
2241         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
2242 }
2243
2244 /*
2245  * Drains all per-CPU charge caches for given root_memcg resp. subtree
2246  * of the hierarchy under it.
2247  */
2248 static void drain_all_stock(struct mem_cgroup *root_memcg)
2249 {
2250         int cpu, curcpu;
2251
2252         /* If someone's already draining, avoid adding running more workers. */
2253         if (!mutex_trylock(&percpu_charge_mutex))
2254                 return;
2255         /*
2256          * Notify other cpus that system-wide "drain" is running
2257          * We do not care about races with the cpu hotplug because cpu down
2258          * as well as workers from this path always operate on the local
2259          * per-cpu data. CPU up doesn't touch memcg_stock at all.
2260          */
2261         migrate_disable();
2262         curcpu = smp_processor_id();
2263         for_each_online_cpu(cpu) {
2264                 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2265                 struct mem_cgroup *memcg;
2266                 bool flush = false;
2267
2268                 rcu_read_lock();
2269                 memcg = stock->cached;
2270                 if (memcg && stock->nr_pages &&
2271                     mem_cgroup_is_descendant(memcg, root_memcg))
2272                         flush = true;
2273                 else if (obj_stock_flush_required(stock, root_memcg))
2274                         flush = true;
2275                 rcu_read_unlock();
2276
2277                 if (flush &&
2278                     !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2279                         if (cpu == curcpu)
2280                                 drain_local_stock(&stock->work);
2281                         else
2282                                 schedule_work_on(cpu, &stock->work);
2283                 }
2284         }
2285         migrate_enable();
2286         mutex_unlock(&percpu_charge_mutex);
2287 }
2288
2289 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2290 {
2291         struct memcg_stock_pcp *stock;
2292
2293         stock = &per_cpu(memcg_stock, cpu);
2294         drain_stock(stock);
2295
2296         return 0;
2297 }
2298
2299 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2300                                   unsigned int nr_pages,
2301                                   gfp_t gfp_mask)
2302 {
2303         unsigned long nr_reclaimed = 0;
2304
2305         do {
2306                 unsigned long pflags;
2307
2308                 if (page_counter_read(&memcg->memory) <=
2309                     READ_ONCE(memcg->memory.high))
2310                         continue;
2311
2312                 memcg_memory_event(memcg, MEMCG_HIGH);
2313
2314                 psi_memstall_enter(&pflags);
2315                 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2316                                                              gfp_mask, true);
2317                 psi_memstall_leave(&pflags);
2318         } while ((memcg = parent_mem_cgroup(memcg)) &&
2319                  !mem_cgroup_is_root(memcg));
2320
2321         return nr_reclaimed;
2322 }
2323
2324 static void high_work_func(struct work_struct *work)
2325 {
2326         struct mem_cgroup *memcg;
2327
2328         memcg = container_of(work, struct mem_cgroup, high_work);
2329         reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2330 }
2331
2332 /*
2333  * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2334  * enough to still cause a significant slowdown in most cases, while still
2335  * allowing diagnostics and tracing to proceed without becoming stuck.
2336  */
2337 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2338
2339 /*
2340  * When calculating the delay, we use these either side of the exponentiation to
2341  * maintain precision and scale to a reasonable number of jiffies (see the table
2342  * below.
2343  *
2344  * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2345  *   overage ratio to a delay.
2346  * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2347  *   proposed penalty in order to reduce to a reasonable number of jiffies, and
2348  *   to produce a reasonable delay curve.
2349  *
2350  * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2351  * reasonable delay curve compared to precision-adjusted overage, not
2352  * penalising heavily at first, but still making sure that growth beyond the
2353  * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2354  * example, with a high of 100 megabytes:
2355  *
2356  *  +-------+------------------------+
2357  *  | usage | time to allocate in ms |
2358  *  +-------+------------------------+
2359  *  | 100M  |                      0 |
2360  *  | 101M  |                      6 |
2361  *  | 102M  |                     25 |
2362  *  | 103M  |                     57 |
2363  *  | 104M  |                    102 |
2364  *  | 105M  |                    159 |
2365  *  | 106M  |                    230 |
2366  *  | 107M  |                    313 |
2367  *  | 108M  |                    409 |
2368  *  | 109M  |                    518 |
2369  *  | 110M  |                    639 |
2370  *  | 111M  |                    774 |
2371  *  | 112M  |                    921 |
2372  *  | 113M  |                   1081 |
2373  *  | 114M  |                   1254 |
2374  *  | 115M  |                   1439 |
2375  *  | 116M  |                   1638 |
2376  *  | 117M  |                   1849 |
2377  *  | 118M  |                   2000 |
2378  *  | 119M  |                   2000 |
2379  *  | 120M  |                   2000 |
2380  *  +-------+------------------------+
2381  */
2382  #define MEMCG_DELAY_PRECISION_SHIFT 20
2383  #define MEMCG_DELAY_SCALING_SHIFT 14
2384
2385 static u64 calculate_overage(unsigned long usage, unsigned long high)
2386 {
2387         u64 overage;
2388
2389         if (usage <= high)
2390                 return 0;
2391
2392         /*
2393          * Prevent division by 0 in overage calculation by acting as if
2394          * it was a threshold of 1 page
2395          */
2396         high = max(high, 1UL);
2397
2398         overage = usage - high;
2399         overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2400         return div64_u64(overage, high);
2401 }
2402
2403 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2404 {
2405         u64 overage, max_overage = 0;
2406
2407         do {
2408                 overage = calculate_overage(page_counter_read(&memcg->memory),
2409                                             READ_ONCE(memcg->memory.high));
2410                 max_overage = max(overage, max_overage);
2411         } while ((memcg = parent_mem_cgroup(memcg)) &&
2412                  !mem_cgroup_is_root(memcg));
2413
2414         return max_overage;
2415 }
2416
2417 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2418 {
2419         u64 overage, max_overage = 0;
2420
2421         do {
2422                 overage = calculate_overage(page_counter_read(&memcg->swap),
2423                                             READ_ONCE(memcg->swap.high));
2424                 if (overage)
2425                         memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2426                 max_overage = max(overage, max_overage);
2427         } while ((memcg = parent_mem_cgroup(memcg)) &&
2428                  !mem_cgroup_is_root(memcg));
2429
2430         return max_overage;
2431 }
2432
2433 /*
2434  * Get the number of jiffies that we should penalise a mischievous cgroup which
2435  * is exceeding its memory.high by checking both it and its ancestors.
2436  */
2437 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2438                                           unsigned int nr_pages,
2439                                           u64 max_overage)
2440 {
2441         unsigned long penalty_jiffies;
2442
2443         if (!max_overage)
2444                 return 0;
2445
2446         /*
2447          * We use overage compared to memory.high to calculate the number of
2448          * jiffies to sleep (penalty_jiffies). Ideally this value should be
2449          * fairly lenient on small overages, and increasingly harsh when the
2450          * memcg in question makes it clear that it has no intention of stopping
2451          * its crazy behaviour, so we exponentially increase the delay based on
2452          * overage amount.
2453          */
2454         penalty_jiffies = max_overage * max_overage * HZ;
2455         penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2456         penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2457
2458         /*
2459          * Factor in the task's own contribution to the overage, such that four
2460          * N-sized allocations are throttled approximately the same as one
2461          * 4N-sized allocation.
2462          *
2463          * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2464          * larger the current charge patch is than that.
2465          */
2466         return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2467 }
2468
2469 /*
2470  * Scheduled by try_charge() to be executed from the userland return path
2471  * and reclaims memory over the high limit.
2472  */
2473 void mem_cgroup_handle_over_high(void)
2474 {
2475         unsigned long penalty_jiffies;
2476         unsigned long pflags;
2477         unsigned long nr_reclaimed;
2478         unsigned int nr_pages = current->memcg_nr_pages_over_high;
2479         int nr_retries = MAX_RECLAIM_RETRIES;
2480         struct mem_cgroup *memcg;
2481         bool in_retry = false;
2482
2483         if (likely(!nr_pages))
2484                 return;
2485
2486         memcg = get_mem_cgroup_from_mm(current->mm);
2487         current->memcg_nr_pages_over_high = 0;
2488
2489 retry_reclaim:
2490         /*
2491          * The allocating task should reclaim at least the batch size, but for
2492          * subsequent retries we only want to do what's necessary to prevent oom
2493          * or breaching resource isolation.
2494          *
2495          * This is distinct from memory.max or page allocator behaviour because
2496          * memory.high is currently batched, whereas memory.max and the page
2497          * allocator run every time an allocation is made.
2498          */
2499         nr_reclaimed = reclaim_high(memcg,
2500                                     in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2501                                     GFP_KERNEL);
2502
2503         /*
2504          * memory.high is breached and reclaim is unable to keep up. Throttle
2505          * allocators proactively to slow down excessive growth.
2506          */
2507         penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2508                                                mem_find_max_overage(memcg));
2509
2510         penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2511                                                 swap_find_max_overage(memcg));
2512
2513         /*
2514          * Clamp the max delay per usermode return so as to still keep the
2515          * application moving forwards and also permit diagnostics, albeit
2516          * extremely slowly.
2517          */
2518         penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2519
2520         /*
2521          * Don't sleep if the amount of jiffies this memcg owes us is so low
2522          * that it's not even worth doing, in an attempt to be nice to those who
2523          * go only a small amount over their memory.high value and maybe haven't
2524          * been aggressively reclaimed enough yet.
2525          */
2526         if (penalty_jiffies <= HZ / 100)
2527                 goto out;
2528
2529         /*
2530          * If reclaim is making forward progress but we're still over
2531          * memory.high, we want to encourage that rather than doing allocator
2532          * throttling.
2533          */
2534         if (nr_reclaimed || nr_retries--) {
2535                 in_retry = true;
2536                 goto retry_reclaim;
2537         }
2538
2539         /*
2540          * If we exit early, we're guaranteed to die (since
2541          * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2542          * need to account for any ill-begotten jiffies to pay them off later.
2543          */
2544         psi_memstall_enter(&pflags);
2545         schedule_timeout_killable(penalty_jiffies);
2546         psi_memstall_leave(&pflags);
2547
2548 out:
2549         css_put(&memcg->css);
2550 }
2551
2552 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2553                         unsigned int nr_pages)
2554 {
2555         unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2556         int nr_retries = MAX_RECLAIM_RETRIES;
2557         struct mem_cgroup *mem_over_limit;
2558         struct page_counter *counter;
2559         unsigned long nr_reclaimed;
2560         bool passed_oom = false;
2561         bool may_swap = true;
2562         bool drained = false;
2563         unsigned long pflags;
2564
2565 retry:
2566         if (consume_stock(memcg, nr_pages))
2567                 return 0;
2568
2569         if (!do_memsw_account() ||
2570             page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2571                 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2572                         goto done_restock;
2573                 if (do_memsw_account())
2574                         page_counter_uncharge(&memcg->memsw, batch);
2575                 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2576         } else {
2577                 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2578                 may_swap = false;
2579         }
2580
2581         if (batch > nr_pages) {
2582                 batch = nr_pages;
2583                 goto retry;
2584         }
2585
2586         /*
2587          * Prevent unbounded recursion when reclaim operations need to
2588          * allocate memory. This might exceed the limits temporarily,
2589          * but we prefer facilitating memory reclaim and getting back
2590          * under the limit over triggering OOM kills in these cases.
2591          */
2592         if (unlikely(current->flags & PF_MEMALLOC))
2593                 goto force;
2594
2595         if (unlikely(task_in_memcg_oom(current)))
2596                 goto nomem;
2597
2598         if (!gfpflags_allow_blocking(gfp_mask))
2599                 goto nomem;
2600
2601         memcg_memory_event(mem_over_limit, MEMCG_MAX);
2602
2603         psi_memstall_enter(&pflags);
2604         nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2605                                                     gfp_mask, may_swap);
2606         psi_memstall_leave(&pflags);
2607
2608         if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2609                 goto retry;
2610
2611         if (!drained) {
2612                 drain_all_stock(mem_over_limit);
2613                 drained = true;
2614                 goto retry;
2615         }
2616
2617         if (gfp_mask & __GFP_NORETRY)
2618                 goto nomem;
2619         /*
2620          * Even though the limit is exceeded at this point, reclaim
2621          * may have been able to free some pages.  Retry the charge
2622          * before killing the task.
2623          *
2624          * Only for regular pages, though: huge pages are rather
2625          * unlikely to succeed so close to the limit, and we fall back
2626          * to regular pages anyway in case of failure.
2627          */
2628         if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2629                 goto retry;
2630         /*
2631          * At task move, charge accounts can be doubly counted. So, it's
2632          * better to wait until the end of task_move if something is going on.
2633          */
2634         if (mem_cgroup_wait_acct_move(mem_over_limit))
2635                 goto retry;
2636
2637         if (nr_retries--)
2638                 goto retry;
2639
2640         if (gfp_mask & __GFP_RETRY_MAYFAIL)
2641                 goto nomem;
2642
2643         /* Avoid endless loop for tasks bypassed by the oom killer */
2644         if (passed_oom && task_is_dying())
2645                 goto nomem;
2646
2647         /*
2648          * keep retrying as long as the memcg oom killer is able to make
2649          * a forward progress or bypass the charge if the oom killer
2650          * couldn't make any progress.
2651          */
2652         if (mem_cgroup_oom(mem_over_limit, gfp_mask,
2653                            get_order(nr_pages * PAGE_SIZE))) {
2654                 passed_oom = true;
2655                 nr_retries = MAX_RECLAIM_RETRIES;
2656                 goto retry;
2657         }
2658 nomem:
2659         /*
2660          * Memcg doesn't have a dedicated reserve for atomic
2661          * allocations. But like the global atomic pool, we need to
2662          * put the burden of reclaim on regular allocation requests
2663          * and let these go through as privileged allocations.
2664          */
2665         if (!(gfp_mask & (__GFP_NOFAIL | __GFP_HIGH)))
2666                 return -ENOMEM;
2667 force:
2668         /*
2669          * The allocation either can't fail or will lead to more memory
2670          * being freed very soon.  Allow memory usage go over the limit
2671          * temporarily by force charging it.
2672          */
2673         page_counter_charge(&memcg->memory, nr_pages);
2674         if (do_memsw_account())
2675                 page_counter_charge(&memcg->memsw, nr_pages);
2676
2677         return 0;
2678
2679 done_restock:
2680         if (batch > nr_pages)
2681                 refill_stock(memcg, batch - nr_pages);
2682
2683         /*
2684          * If the hierarchy is above the normal consumption range, schedule
2685          * reclaim on returning to userland.  We can perform reclaim here
2686          * if __GFP_RECLAIM but let's always punt for simplicity and so that
2687          * GFP_KERNEL can consistently be used during reclaim.  @memcg is
2688          * not recorded as it most likely matches current's and won't
2689          * change in the meantime.  As high limit is checked again before
2690          * reclaim, the cost of mismatch is negligible.
2691          */
2692         do {
2693                 bool mem_high, swap_high;
2694
2695                 mem_high = page_counter_read(&memcg->memory) >
2696                         READ_ONCE(memcg->memory.high);
2697                 swap_high = page_counter_read(&memcg->swap) >
2698                         READ_ONCE(memcg->swap.high);
2699
2700                 /* Don't bother a random interrupted task */
2701                 if (!in_task()) {
2702                         if (mem_high) {
2703                                 schedule_work(&memcg->high_work);
2704                                 break;
2705                         }
2706                         continue;
2707                 }
2708
2709                 if (mem_high || swap_high) {
2710                         /*
2711                          * The allocating tasks in this cgroup will need to do
2712                          * reclaim or be throttled to prevent further growth
2713                          * of the memory or swap footprints.
2714                          *
2715                          * Target some best-effort fairness between the tasks,
2716                          * and distribute reclaim work and delay penalties
2717                          * based on how much each task is actually allocating.
2718                          */
2719                         current->memcg_nr_pages_over_high += batch;
2720                         set_notify_resume(current);
2721                         break;
2722                 }
2723         } while ((memcg = parent_mem_cgroup(memcg)));
2724
2725         if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
2726             !(current->flags & PF_MEMALLOC) &&
2727             gfpflags_allow_blocking(gfp_mask)) {
2728                 mem_cgroup_handle_over_high();
2729         }
2730         return 0;
2731 }
2732
2733 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2734                              unsigned int nr_pages)
2735 {
2736         if (mem_cgroup_is_root(memcg))
2737                 return 0;
2738
2739         return try_charge_memcg(memcg, gfp_mask, nr_pages);
2740 }
2741
2742 static inline void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2743 {
2744         if (mem_cgroup_is_root(memcg))
2745                 return;
2746
2747         page_counter_uncharge(&memcg->memory, nr_pages);
2748         if (do_memsw_account())
2749                 page_counter_uncharge(&memcg->memsw, nr_pages);
2750 }
2751
2752 static void commit_charge(struct folio *folio, struct mem_cgroup *memcg)
2753 {
2754         VM_BUG_ON_FOLIO(folio_memcg(folio), folio);
2755         /*
2756          * Any of the following ensures page's memcg stability:
2757          *
2758          * - the page lock
2759          * - LRU isolation
2760          * - lock_page_memcg()
2761          * - exclusive reference
2762          */
2763         folio->memcg_data = (unsigned long)memcg;
2764 }
2765
2766 #ifdef CONFIG_MEMCG_KMEM
2767 /*
2768  * The allocated objcg pointers array is not accounted directly.
2769  * Moreover, it should not come from DMA buffer and is not readily
2770  * reclaimable. So those GFP bits should be masked off.
2771  */
2772 #define OBJCGS_CLEAR_MASK       (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
2773
2774 /*
2775  * mod_objcg_mlstate() may be called with irq enabled, so
2776  * mod_memcg_lruvec_state() should be used.
2777  */
2778 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
2779                                      struct pglist_data *pgdat,
2780                                      enum node_stat_item idx, int nr)
2781 {
2782         struct mem_cgroup *memcg;
2783         struct lruvec *lruvec;
2784
2785         rcu_read_lock();
2786         memcg = obj_cgroup_memcg(objcg);
2787         lruvec = mem_cgroup_lruvec(memcg, pgdat);
2788         mod_memcg_lruvec_state(lruvec, idx, nr);
2789         rcu_read_unlock();
2790 }
2791
2792 int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s,
2793                                  gfp_t gfp, bool new_slab)
2794 {
2795         unsigned int objects = objs_per_slab(s, slab);
2796         unsigned long memcg_data;
2797         void *vec;
2798
2799         gfp &= ~OBJCGS_CLEAR_MASK;
2800         vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2801                            slab_nid(slab));
2802         if (!vec)
2803                 return -ENOMEM;
2804
2805         memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
2806         if (new_slab) {
2807                 /*
2808                  * If the slab is brand new and nobody can yet access its
2809                  * memcg_data, no synchronization is required and memcg_data can
2810                  * be simply assigned.
2811                  */
2812                 slab->memcg_data = memcg_data;
2813         } else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) {
2814                 /*
2815                  * If the slab is already in use, somebody can allocate and
2816                  * assign obj_cgroups in parallel. In this case the existing
2817                  * objcg vector should be reused.
2818                  */
2819                 kfree(vec);
2820                 return 0;
2821         }
2822
2823         kmemleak_not_leak(vec);
2824         return 0;
2825 }
2826
2827 /*
2828  * Returns a pointer to the memory cgroup to which the kernel object is charged.
2829  *
2830  * A passed kernel object can be a slab object or a generic kernel page, so
2831  * different mechanisms for getting the memory cgroup pointer should be used.
2832  * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
2833  * can not know for sure how the kernel object is implemented.
2834  * mem_cgroup_from_obj() can be safely used in such cases.
2835  *
2836  * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2837  * cgroup_mutex, etc.
2838  */
2839 struct mem_cgroup *mem_cgroup_from_obj(void *p)
2840 {
2841         struct folio *folio;
2842
2843         if (mem_cgroup_disabled())
2844                 return NULL;
2845
2846         folio = virt_to_folio(p);
2847
2848         /*
2849          * Slab objects are accounted individually, not per-page.
2850          * Memcg membership data for each individual object is saved in
2851          * slab->memcg_data.
2852          */
2853         if (folio_test_slab(folio)) {
2854                 struct obj_cgroup **objcgs;
2855                 struct slab *slab;
2856                 unsigned int off;
2857
2858                 slab = folio_slab(folio);
2859                 objcgs = slab_objcgs(slab);
2860                 if (!objcgs)
2861                         return NULL;
2862
2863                 off = obj_to_index(slab->slab_cache, slab, p);
2864                 if (objcgs[off])
2865                         return obj_cgroup_memcg(objcgs[off]);
2866
2867                 return NULL;
2868         }
2869
2870         /*
2871          * page_memcg_check() is used here, because in theory we can encounter
2872          * a folio where the slab flag has been cleared already, but
2873          * slab->memcg_data has not been freed yet
2874          * page_memcg_check(page) will guarantee that a proper memory
2875          * cgroup pointer or NULL will be returned.
2876          */
2877         return page_memcg_check(folio_page(folio, 0));
2878 }
2879
2880 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
2881 {
2882         struct obj_cgroup *objcg = NULL;
2883         struct mem_cgroup *memcg;
2884
2885         if (memcg_kmem_bypass())
2886                 return NULL;
2887
2888         rcu_read_lock();
2889         if (unlikely(active_memcg()))
2890                 memcg = active_memcg();
2891         else
2892                 memcg = mem_cgroup_from_task(current);
2893
2894         for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
2895                 objcg = rcu_dereference(memcg->objcg);
2896                 if (objcg && obj_cgroup_tryget(objcg))
2897                         break;
2898                 objcg = NULL;
2899         }
2900         rcu_read_unlock();
2901
2902         return objcg;
2903 }
2904
2905 static void memcg_account_kmem(struct mem_cgroup *memcg, int nr_pages)
2906 {
2907         mod_memcg_state(memcg, MEMCG_KMEM, nr_pages);
2908         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
2909                 if (nr_pages > 0)
2910                         page_counter_charge(&memcg->kmem, nr_pages);
2911                 else
2912                         page_counter_uncharge(&memcg->kmem, -nr_pages);
2913         }
2914 }
2915
2916
2917 /*
2918  * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2919  * @objcg: object cgroup to uncharge
2920  * @nr_pages: number of pages to uncharge
2921  */
2922 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
2923                                       unsigned int nr_pages)
2924 {
2925         struct mem_cgroup *memcg;
2926
2927         memcg = get_mem_cgroup_from_objcg(objcg);
2928
2929         memcg_account_kmem(memcg, -nr_pages);
2930         refill_stock(memcg, nr_pages);
2931
2932         css_put(&memcg->css);
2933 }
2934
2935 /*
2936  * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
2937  * @objcg: object cgroup to charge
2938  * @gfp: reclaim mode
2939  * @nr_pages: number of pages to charge
2940  *
2941  * Returns 0 on success, an error code on failure.
2942  */
2943 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
2944                                    unsigned int nr_pages)
2945 {
2946         struct mem_cgroup *memcg;
2947         int ret;
2948
2949         memcg = get_mem_cgroup_from_objcg(objcg);
2950
2951         ret = try_charge_memcg(memcg, gfp, nr_pages);
2952         if (ret)
2953                 goto out;
2954
2955         memcg_account_kmem(memcg, nr_pages);
2956 out:
2957         css_put(&memcg->css);
2958
2959         return ret;
2960 }
2961
2962 /**
2963  * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
2964  * @page: page to charge
2965  * @gfp: reclaim mode
2966  * @order: allocation order
2967  *
2968  * Returns 0 on success, an error code on failure.
2969  */
2970 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
2971 {
2972         struct obj_cgroup *objcg;
2973         int ret = 0;
2974
2975         objcg = get_obj_cgroup_from_current();
2976         if (objcg) {
2977                 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
2978                 if (!ret) {
2979                         page->memcg_data = (unsigned long)objcg |
2980                                 MEMCG_DATA_KMEM;
2981                         return 0;
2982                 }
2983                 obj_cgroup_put(objcg);
2984         }
2985         return ret;
2986 }
2987
2988 /**
2989  * __memcg_kmem_uncharge_page: uncharge a kmem page
2990  * @page: page to uncharge
2991  * @order: allocation order
2992  */
2993 void __memcg_kmem_uncharge_page(struct page *page, int order)
2994 {
2995         struct folio *folio = page_folio(page);
2996         struct obj_cgroup *objcg;
2997         unsigned int nr_pages = 1 << order;
2998
2999         if (!folio_memcg_kmem(folio))
3000                 return;
3001
3002         objcg = __folio_objcg(folio);
3003         obj_cgroup_uncharge_pages(objcg, nr_pages);
3004         folio->memcg_data = 0;
3005         obj_cgroup_put(objcg);
3006 }
3007
3008 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3009                      enum node_stat_item idx, int nr)
3010 {
3011         struct memcg_stock_pcp *stock;
3012         struct obj_cgroup *old = NULL;
3013         unsigned long flags;
3014         int *bytes;
3015
3016         local_lock_irqsave(&memcg_stock.stock_lock, flags);
3017         stock = this_cpu_ptr(&memcg_stock);
3018
3019         /*
3020          * Save vmstat data in stock and skip vmstat array update unless
3021          * accumulating over a page of vmstat data or when pgdat or idx
3022          * changes.
3023          */
3024         if (stock->cached_objcg != objcg) {
3025                 old = drain_obj_stock(stock);
3026                 obj_cgroup_get(objcg);
3027                 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3028                                 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3029                 stock->cached_objcg = objcg;
3030                 stock->cached_pgdat = pgdat;
3031         } else if (stock->cached_pgdat != pgdat) {
3032                 /* Flush the existing cached vmstat data */
3033                 struct pglist_data *oldpg = stock->cached_pgdat;
3034
3035                 if (stock->nr_slab_reclaimable_b) {
3036                         mod_objcg_mlstate(objcg, oldpg, NR_SLAB_RECLAIMABLE_B,
3037                                           stock->nr_slab_reclaimable_b);
3038                         stock->nr_slab_reclaimable_b = 0;
3039                 }
3040                 if (stock->nr_slab_unreclaimable_b) {
3041                         mod_objcg_mlstate(objcg, oldpg, NR_SLAB_UNRECLAIMABLE_B,
3042                                           stock->nr_slab_unreclaimable_b);
3043                         stock->nr_slab_unreclaimable_b = 0;
3044                 }
3045                 stock->cached_pgdat = pgdat;
3046         }
3047
3048         bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3049                                                : &stock->nr_slab_unreclaimable_b;
3050         /*
3051          * Even for large object >= PAGE_SIZE, the vmstat data will still be
3052          * cached locally at least once before pushing it out.
3053          */
3054         if (!*bytes) {
3055                 *bytes = nr;
3056                 nr = 0;
3057         } else {
3058                 *bytes += nr;
3059                 if (abs(*bytes) > PAGE_SIZE) {
3060                         nr = *bytes;
3061                         *bytes = 0;
3062                 } else {
3063                         nr = 0;
3064                 }
3065         }
3066         if (nr)
3067                 mod_objcg_mlstate(objcg, pgdat, idx, nr);
3068
3069         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3070         if (old)
3071                 obj_cgroup_put(old);
3072 }
3073
3074 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3075 {
3076         struct memcg_stock_pcp *stock;
3077         unsigned long flags;
3078         bool ret = false;
3079
3080         local_lock_irqsave(&memcg_stock.stock_lock, flags);
3081
3082         stock = this_cpu_ptr(&memcg_stock);
3083         if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3084                 stock->nr_bytes -= nr_bytes;
3085                 ret = true;
3086         }
3087
3088         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3089
3090         return ret;
3091 }
3092
3093 static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock)
3094 {
3095         struct obj_cgroup *old = stock->cached_objcg;
3096
3097         if (!old)
3098                 return NULL;
3099
3100         if (stock->nr_bytes) {
3101                 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3102                 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3103
3104                 if (nr_pages) {
3105                         struct mem_cgroup *memcg;
3106
3107                         memcg = get_mem_cgroup_from_objcg(old);
3108
3109                         memcg_account_kmem(memcg, -nr_pages);
3110                         __refill_stock(memcg, nr_pages);
3111
3112                         css_put(&memcg->css);
3113                 }
3114
3115                 /*
3116                  * The leftover is flushed to the centralized per-memcg value.
3117                  * On the next attempt to refill obj stock it will be moved
3118                  * to a per-cpu stock (probably, on an other CPU), see
3119                  * refill_obj_stock().
3120                  *
3121                  * How often it's flushed is a trade-off between the memory
3122                  * limit enforcement accuracy and potential CPU contention,
3123                  * so it might be changed in the future.
3124                  */
3125                 atomic_add(nr_bytes, &old->nr_charged_bytes);
3126                 stock->nr_bytes = 0;
3127         }
3128
3129         /*
3130          * Flush the vmstat data in current stock
3131          */
3132         if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3133                 if (stock->nr_slab_reclaimable_b) {
3134                         mod_objcg_mlstate(old, stock->cached_pgdat,
3135                                           NR_SLAB_RECLAIMABLE_B,
3136                                           stock->nr_slab_reclaimable_b);
3137                         stock->nr_slab_reclaimable_b = 0;
3138                 }
3139                 if (stock->nr_slab_unreclaimable_b) {
3140                         mod_objcg_mlstate(old, stock->cached_pgdat,
3141                                           NR_SLAB_UNRECLAIMABLE_B,
3142                                           stock->nr_slab_unreclaimable_b);
3143                         stock->nr_slab_unreclaimable_b = 0;
3144                 }
3145                 stock->cached_pgdat = NULL;
3146         }
3147
3148         stock->cached_objcg = NULL;
3149         /*
3150          * The `old' objects needs to be released by the caller via
3151          * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock.
3152          */
3153         return old;
3154 }
3155
3156 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3157                                      struct mem_cgroup *root_memcg)
3158 {
3159         struct mem_cgroup *memcg;
3160
3161         if (stock->cached_objcg) {
3162                 memcg = obj_cgroup_memcg(stock->cached_objcg);
3163                 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3164                         return true;
3165         }
3166
3167         return false;
3168 }
3169
3170 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3171                              bool allow_uncharge)
3172 {
3173         struct memcg_stock_pcp *stock;
3174         struct obj_cgroup *old = NULL;
3175         unsigned long flags;
3176         unsigned int nr_pages = 0;
3177
3178         local_lock_irqsave(&memcg_stock.stock_lock, flags);
3179
3180         stock = this_cpu_ptr(&memcg_stock);
3181         if (stock->cached_objcg != objcg) { /* reset if necessary */
3182                 old = drain_obj_stock(stock);
3183                 obj_cgroup_get(objcg);
3184                 stock->cached_objcg = objcg;
3185                 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3186                                 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3187                 allow_uncharge = true;  /* Allow uncharge when objcg changes */
3188         }
3189         stock->nr_bytes += nr_bytes;
3190
3191         if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3192                 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3193                 stock->nr_bytes &= (PAGE_SIZE - 1);
3194         }
3195
3196         local_unlock_irqrestore(&memcg_stock.stock_lock, flags);
3197         if (old)
3198                 obj_cgroup_put(old);
3199
3200         if (nr_pages)
3201                 obj_cgroup_uncharge_pages(objcg, nr_pages);
3202 }
3203
3204 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3205 {
3206         unsigned int nr_pages, nr_bytes;
3207         int ret;
3208
3209         if (consume_obj_stock(objcg, size))
3210                 return 0;
3211
3212         /*
3213          * In theory, objcg->nr_charged_bytes can have enough
3214          * pre-charged bytes to satisfy the allocation. However,
3215          * flushing objcg->nr_charged_bytes requires two atomic
3216          * operations, and objcg->nr_charged_bytes can't be big.
3217          * The shared objcg->nr_charged_bytes can also become a
3218          * performance bottleneck if all tasks of the same memcg are
3219          * trying to update it. So it's better to ignore it and try
3220          * grab some new pages. The stock's nr_bytes will be flushed to
3221          * objcg->nr_charged_bytes later on when objcg changes.
3222          *
3223          * The stock's nr_bytes may contain enough pre-charged bytes
3224          * to allow one less page from being charged, but we can't rely
3225          * on the pre-charged bytes not being changed outside of
3226          * consume_obj_stock() or refill_obj_stock(). So ignore those
3227          * pre-charged bytes as well when charging pages. To avoid a
3228          * page uncharge right after a page charge, we set the
3229          * allow_uncharge flag to false when calling refill_obj_stock()
3230          * to temporarily allow the pre-charged bytes to exceed the page
3231          * size limit. The maximum reachable value of the pre-charged
3232          * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3233          * race.
3234          */
3235         nr_pages = size >> PAGE_SHIFT;
3236         nr_bytes = size & (PAGE_SIZE - 1);
3237
3238         if (nr_bytes)
3239                 nr_pages += 1;
3240
3241         ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3242         if (!ret && nr_bytes)
3243                 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3244
3245         return ret;
3246 }
3247
3248 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3249 {
3250         refill_obj_stock(objcg, size, true);
3251 }
3252
3253 #endif /* CONFIG_MEMCG_KMEM */
3254
3255 /*
3256  * Because page_memcg(head) is not set on tails, set it now.
3257  */
3258 void split_page_memcg(struct page *head, unsigned int nr)
3259 {
3260         struct folio *folio = page_folio(head);
3261         struct mem_cgroup *memcg = folio_memcg(folio);
3262         int i;
3263
3264         if (mem_cgroup_disabled() || !memcg)
3265                 return;
3266
3267         for (i = 1; i < nr; i++)
3268                 folio_page(folio, i)->memcg_data = folio->memcg_data;
3269
3270         if (folio_memcg_kmem(folio))
3271                 obj_cgroup_get_many(__folio_objcg(folio), nr - 1);
3272         else
3273                 css_get_many(&memcg->css, nr - 1);
3274 }
3275
3276 #ifdef CONFIG_MEMCG_SWAP
3277 /**
3278  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3279  * @entry: swap entry to be moved
3280  * @from:  mem_cgroup which the entry is moved from
3281  * @to:  mem_cgroup which the entry is moved to
3282  *
3283  * It succeeds only when the swap_cgroup's record for this entry is the same
3284  * as the mem_cgroup's id of @from.
3285  *
3286  * Returns 0 on success, -EINVAL on failure.
3287  *
3288  * The caller must have charged to @to, IOW, called page_counter_charge() about
3289  * both res and memsw, and called css_get().
3290  */
3291 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3292                                 struct mem_cgroup *from, struct mem_cgroup *to)
3293 {
3294         unsigned short old_id, new_id;
3295
3296         old_id = mem_cgroup_id(from);
3297         new_id = mem_cgroup_id(to);
3298
3299         if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3300                 mod_memcg_state(from, MEMCG_SWAP, -1);
3301                 mod_memcg_state(to, MEMCG_SWAP, 1);
3302                 return 0;
3303         }
3304         return -EINVAL;
3305 }
3306 #else
3307 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3308                                 struct mem_cgroup *from, struct mem_cgroup *to)
3309 {
3310         return -EINVAL;
3311 }
3312 #endif
3313
3314 static DEFINE_MUTEX(memcg_max_mutex);
3315
3316 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3317                                  unsigned long max, bool memsw)
3318 {
3319         bool enlarge = false;
3320         bool drained = false;
3321         int ret;
3322         bool limits_invariant;
3323         struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3324
3325         do {
3326                 if (signal_pending(current)) {
3327                         ret = -EINTR;
3328                         break;
3329                 }
3330
3331                 mutex_lock(&memcg_max_mutex);
3332                 /*
3333                  * Make sure that the new limit (memsw or memory limit) doesn't
3334                  * break our basic invariant rule memory.max <= memsw.max.
3335                  */
3336                 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3337                                            max <= memcg->memsw.max;
3338                 if (!limits_invariant) {
3339                         mutex_unlock(&memcg_max_mutex);
3340                         ret = -EINVAL;
3341                         break;
3342                 }
3343                 if (max > counter->max)
3344                         enlarge = true;
3345                 ret = page_counter_set_max(counter, max);
3346                 mutex_unlock(&memcg_max_mutex);
3347
3348                 if (!ret)
3349                         break;
3350
3351                 if (!drained) {
3352                         drain_all_stock(memcg);
3353                         drained = true;
3354                         continue;
3355                 }
3356
3357                 if (!try_to_free_mem_cgroup_pages(memcg, 1,
3358                                         GFP_KERNEL, !memsw)) {
3359                         ret = -EBUSY;
3360                         break;
3361                 }
3362         } while (true);
3363
3364         if (!ret && enlarge)
3365                 memcg_oom_recover(memcg);
3366
3367         return ret;
3368 }
3369
3370 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3371                                             gfp_t gfp_mask,
3372                                             unsigned long *total_scanned)
3373 {
3374         unsigned long nr_reclaimed = 0;
3375         struct mem_cgroup_per_node *mz, *next_mz = NULL;
3376         unsigned long reclaimed;
3377         int loop = 0;
3378         struct mem_cgroup_tree_per_node *mctz;
3379         unsigned long excess;
3380         unsigned long nr_scanned;
3381
3382         if (order > 0)
3383                 return 0;
3384
3385         mctz = soft_limit_tree.rb_tree_per_node[pgdat->node_id];
3386
3387         /*
3388          * Do not even bother to check the largest node if the root
3389          * is empty. Do it lockless to prevent lock bouncing. Races
3390          * are acceptable as soft limit is best effort anyway.
3391          */
3392         if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3393                 return 0;
3394
3395         /*
3396          * This loop can run a while, specially if mem_cgroup's continuously
3397          * keep exceeding their soft limit and putting the system under
3398          * pressure
3399          */
3400         do {
3401                 if (next_mz)
3402                         mz = next_mz;
3403                 else
3404                         mz = mem_cgroup_largest_soft_limit_node(mctz);
3405                 if (!mz)
3406                         break;
3407
3408                 nr_scanned = 0;
3409                 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3410                                                     gfp_mask, &nr_scanned);
3411                 nr_reclaimed += reclaimed;
3412                 *total_scanned += nr_scanned;
3413                 spin_lock_irq(&mctz->lock);
3414                 __mem_cgroup_remove_exceeded(mz, mctz);
3415
3416                 /*
3417                  * If we failed to reclaim anything from this memory cgroup
3418                  * it is time to move on to the next cgroup
3419                  */
3420                 next_mz = NULL;
3421                 if (!reclaimed)
3422                         next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3423
3424                 excess = soft_limit_excess(mz->memcg);
3425                 /*
3426                  * One school of thought says that we should not add
3427                  * back the node to the tree if reclaim returns 0.
3428                  * But our reclaim could return 0, simply because due
3429                  * to priority we are exposing a smaller subset of
3430                  * memory to reclaim from. Consider this as a longer
3431                  * term TODO.
3432                  */
3433                 /* If excess == 0, no tree ops */
3434                 __mem_cgroup_insert_exceeded(mz, mctz, excess);
3435                 spin_unlock_irq(&mctz->lock);
3436                 css_put(&mz->memcg->css);
3437                 loop++;
3438                 /*
3439                  * Could not reclaim anything and there are no more
3440                  * mem cgroups to try or we seem to be looping without
3441                  * reclaiming anything.
3442                  */
3443                 if (!nr_reclaimed &&
3444                         (next_mz == NULL ||
3445                         loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3446                         break;
3447         } while (!nr_reclaimed);
3448         if (next_mz)
3449                 css_put(&next_mz->memcg->css);
3450         return nr_reclaimed;
3451 }
3452
3453 /*
3454  * Reclaims as many pages from the given memcg as possible.
3455  *
3456  * Caller is responsible for holding css reference for memcg.
3457  */
3458 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3459 {
3460         int nr_retries = MAX_RECLAIM_RETRIES;
3461
3462         /* we call try-to-free pages for make this cgroup empty */
3463         lru_add_drain_all();
3464
3465         drain_all_stock(memcg);
3466
3467         /* try to free all pages in this cgroup */
3468         while (nr_retries && page_counter_read(&memcg->memory)) {
3469                 if (signal_pending(current))
3470                         return -EINTR;
3471
3472                 if (!try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true))
3473                         nr_retries--;
3474         }
3475
3476         return 0;
3477 }
3478
3479 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3480                                             char *buf, size_t nbytes,
3481                                             loff_t off)
3482 {
3483         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3484
3485         if (mem_cgroup_is_root(memcg))
3486                 return -EINVAL;
3487         return mem_cgroup_force_empty(memcg) ?: nbytes;
3488 }
3489
3490 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3491                                      struct cftype *cft)
3492 {
3493         return 1;
3494 }
3495
3496 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3497                                       struct cftype *cft, u64 val)
3498 {
3499         if (val == 1)
3500                 return 0;
3501
3502         pr_warn_once("Non-hierarchical mode is deprecated. "
3503                      "Please report your usecase to linux-mm@kvack.org if you "
3504                      "depend on this functionality.\n");
3505
3506         return -EINVAL;
3507 }
3508
3509 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3510 {
3511         unsigned long val;
3512
3513         if (mem_cgroup_is_root(memcg)) {
3514                 mem_cgroup_flush_stats();
3515                 val = memcg_page_state(memcg, NR_FILE_PAGES) +
3516                         memcg_page_state(memcg, NR_ANON_MAPPED);
3517                 if (swap)
3518                         val += memcg_page_state(memcg, MEMCG_SWAP);
3519         } else {
3520                 if (!swap)
3521                         val = page_counter_read(&memcg->memory);
3522                 else
3523                         val = page_counter_read(&memcg->memsw);
3524         }
3525         return val;
3526 }
3527
3528 enum {
3529         RES_USAGE,
3530         RES_LIMIT,
3531         RES_MAX_USAGE,
3532         RES_FAILCNT,
3533         RES_SOFT_LIMIT,
3534 };
3535
3536 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3537                                struct cftype *cft)
3538 {
3539         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3540         struct page_counter *counter;
3541
3542         switch (MEMFILE_TYPE(cft->private)) {
3543         case _MEM:
3544                 counter = &memcg->memory;
3545                 break;
3546         case _MEMSWAP:
3547                 counter = &memcg->memsw;
3548                 break;
3549         case _KMEM:
3550                 counter = &memcg->kmem;
3551                 break;
3552         case _TCP:
3553                 counter = &memcg->tcpmem;
3554                 break;
3555         default:
3556                 BUG();
3557         }
3558
3559         switch (MEMFILE_ATTR(cft->private)) {
3560         case RES_USAGE:
3561                 if (counter == &memcg->memory)
3562                         return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3563                 if (counter == &memcg->memsw)
3564                         return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3565                 return (u64)page_counter_read(counter) * PAGE_SIZE;
3566         case RES_LIMIT:
3567                 return (u64)counter->max * PAGE_SIZE;
3568         case RES_MAX_USAGE:
3569                 return (u64)counter->watermark * PAGE_SIZE;
3570         case RES_FAILCNT:
3571                 return counter->failcnt;
3572         case RES_SOFT_LIMIT:
3573                 return (u64)memcg->soft_limit * PAGE_SIZE;
3574         default:
3575                 BUG();
3576         }
3577 }
3578
3579 #ifdef CONFIG_MEMCG_KMEM
3580 static int memcg_online_kmem(struct mem_cgroup *memcg)
3581 {
3582         struct obj_cgroup *objcg;
3583
3584         if (cgroup_memory_nokmem)
3585                 return 0;
3586
3587         if (unlikely(mem_cgroup_is_root(memcg)))
3588                 return 0;
3589
3590         objcg = obj_cgroup_alloc();
3591         if (!objcg)
3592                 return -ENOMEM;
3593
3594         objcg->memcg = memcg;
3595         rcu_assign_pointer(memcg->objcg, objcg);
3596
3597         static_branch_enable(&memcg_kmem_enabled_key);
3598
3599         memcg->kmemcg_id = memcg->id.id;
3600
3601         return 0;
3602 }
3603
3604 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3605 {
3606         struct mem_cgroup *parent;
3607
3608         if (cgroup_memory_nokmem)
3609                 return;
3610
3611         if (unlikely(mem_cgroup_is_root(memcg)))
3612                 return;
3613
3614         parent = parent_mem_cgroup(memcg);
3615         if (!parent)
3616                 parent = root_mem_cgroup;
3617
3618         memcg_reparent_objcgs(memcg, parent);
3619
3620         /*
3621          * After we have finished memcg_reparent_objcgs(), all list_lrus
3622          * corresponding to this cgroup are guaranteed to remain empty.
3623          * The ordering is imposed by list_lru_node->lock taken by
3624          * memcg_reparent_list_lrus().
3625          */
3626         memcg_reparent_list_lrus(memcg, parent);
3627 }
3628 #else
3629 static int memcg_online_kmem(struct mem_cgroup *memcg)
3630 {
3631         return 0;
3632 }
3633 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3634 {
3635 }
3636 #endif /* CONFIG_MEMCG_KMEM */
3637
3638 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3639 {
3640         int ret;
3641
3642         mutex_lock(&memcg_max_mutex);
3643
3644         ret = page_counter_set_max(&memcg->tcpmem, max);
3645         if (ret)
3646                 goto out;
3647
3648         if (!memcg->tcpmem_active) {
3649                 /*
3650                  * The active flag needs to be written after the static_key
3651                  * update. This is what guarantees that the socket activation
3652                  * function is the last one to run. See mem_cgroup_sk_alloc()
3653                  * for details, and note that we don't mark any socket as
3654                  * belonging to this memcg until that flag is up.
3655                  *
3656                  * We need to do this, because static_keys will span multiple
3657                  * sites, but we can't control their order. If we mark a socket
3658                  * as accounted, but the accounting functions are not patched in
3659                  * yet, we'll lose accounting.
3660                  *
3661                  * We never race with the readers in mem_cgroup_sk_alloc(),
3662                  * because when this value change, the code to process it is not
3663                  * patched in yet.
3664                  */
3665                 static_branch_inc(&memcg_sockets_enabled_key);
3666                 memcg->tcpmem_active = true;
3667         }
3668 out:
3669         mutex_unlock(&memcg_max_mutex);
3670         return ret;
3671 }
3672
3673 /*
3674  * The user of this function is...
3675  * RES_LIMIT.
3676  */
3677 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3678                                 char *buf, size_t nbytes, loff_t off)
3679 {
3680         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3681         unsigned long nr_pages;
3682         int ret;
3683
3684         buf = strstrip(buf);
3685         ret = page_counter_memparse(buf, "-1", &nr_pages);
3686         if (ret)
3687                 return ret;
3688
3689         switch (MEMFILE_ATTR(of_cft(of)->private)) {
3690         case RES_LIMIT:
3691                 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3692                         ret = -EINVAL;
3693                         break;
3694                 }
3695                 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3696                 case _MEM:
3697                         ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3698                         break;
3699                 case _MEMSWAP:
3700                         ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3701                         break;
3702                 case _KMEM:
3703                         /* kmem.limit_in_bytes is deprecated. */
3704                         ret = -EOPNOTSUPP;
3705                         break;
3706                 case _TCP:
3707                         ret = memcg_update_tcp_max(memcg, nr_pages);
3708                         break;
3709                 }
3710                 break;
3711         case RES_SOFT_LIMIT:
3712                 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
3713                         ret = -EOPNOTSUPP;
3714                 } else {
3715                         memcg->soft_limit = nr_pages;
3716                         ret = 0;
3717                 }
3718                 break;
3719         }
3720         return ret ?: nbytes;
3721 }
3722
3723 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3724                                 size_t nbytes, loff_t off)
3725 {
3726         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3727         struct page_counter *counter;
3728
3729         switch (MEMFILE_TYPE(of_cft(of)->private)) {
3730         case _MEM:
3731                 counter = &memcg->memory;
3732                 break;
3733         case _MEMSWAP:
3734                 counter = &memcg->memsw;
3735                 break;
3736         case _KMEM:
3737                 counter = &memcg->kmem;
3738                 break;
3739         case _TCP:
3740                 counter = &memcg->tcpmem;
3741                 break;
3742         default:
3743                 BUG();
3744         }
3745
3746         switch (MEMFILE_ATTR(of_cft(of)->private)) {
3747         case RES_MAX_USAGE:
3748                 page_counter_reset_watermark(counter);
3749                 break;
3750         case RES_FAILCNT:
3751                 counter->failcnt = 0;
3752                 break;
3753         default:
3754                 BUG();
3755         }
3756
3757         return nbytes;
3758 }
3759
3760 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3761                                         struct cftype *cft)
3762 {
3763         return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3764 }
3765
3766 #ifdef CONFIG_MMU
3767 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3768                                         struct cftype *cft, u64 val)
3769 {
3770         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3771
3772         if (val & ~MOVE_MASK)
3773                 return -EINVAL;
3774
3775         /*
3776          * No kind of locking is needed in here, because ->can_attach() will
3777          * check this value once in the beginning of the process, and then carry
3778          * on with stale data. This means that changes to this value will only
3779          * affect task migrations starting after the change.
3780          */
3781         memcg->move_charge_at_immigrate = val;
3782         return 0;
3783 }
3784 #else
3785 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3786                                         struct cftype *cft, u64 val)
3787 {
3788         return -ENOSYS;
3789 }
3790 #endif
3791
3792 #ifdef CONFIG_NUMA
3793
3794 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3795 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3796 #define LRU_ALL      ((1 << NR_LRU_LISTS) - 1)
3797
3798 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3799                                 int nid, unsigned int lru_mask, bool tree)
3800 {
3801         struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3802         unsigned long nr = 0;
3803         enum lru_list lru;
3804
3805         VM_BUG_ON((unsigned)nid >= nr_node_ids);
3806
3807         for_each_lru(lru) {
3808                 if (!(BIT(lru) & lru_mask))
3809                         continue;
3810                 if (tree)
3811                         nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3812                 else
3813                         nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3814         }
3815         return nr;
3816 }
3817
3818 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3819                                              unsigned int lru_mask,
3820                                              bool tree)
3821 {
3822         unsigned long nr = 0;
3823         enum lru_list lru;
3824
3825         for_each_lru(lru) {
3826                 if (!(BIT(lru) & lru_mask))
3827                         continue;
3828                 if (tree)
3829                         nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3830                 else
3831                         nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3832         }
3833         return nr;
3834 }
3835
3836 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3837 {
3838         struct numa_stat {
3839                 const char *name;
3840                 unsigned int lru_mask;
3841         };
3842
3843         static const struct numa_stat stats[] = {
3844                 { "total", LRU_ALL },
3845                 { "file", LRU_ALL_FILE },
3846                 { "anon", LRU_ALL_ANON },
3847                 { "unevictable", BIT(LRU_UNEVICTABLE) },
3848         };
3849         const struct numa_stat *stat;
3850         int nid;
3851         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3852
3853         mem_cgroup_flush_stats();
3854
3855         for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3856                 seq_printf(m, "%s=%lu", stat->name,
3857                            mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3858                                                    false));
3859                 for_each_node_state(nid, N_MEMORY)
3860                         seq_printf(m, " N%d=%lu", nid,
3861                                    mem_cgroup_node_nr_lru_pages(memcg, nid,
3862                                                         stat->lru_mask, false));
3863                 seq_putc(m, '\n');
3864         }
3865
3866         for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3867
3868                 seq_printf(m, "hierarchical_%s=%lu", stat->name,
3869                            mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3870                                                    true));
3871                 for_each_node_state(nid, N_MEMORY)
3872                         seq_printf(m, " N%d=%lu", nid,
3873                                    mem_cgroup_node_nr_lru_pages(memcg, nid,
3874                                                         stat->lru_mask, true));
3875                 seq_putc(m, '\n');
3876         }
3877
3878         return 0;
3879 }
3880 #endif /* CONFIG_NUMA */
3881
3882 static const unsigned int memcg1_stats[] = {
3883         NR_FILE_PAGES,
3884         NR_ANON_MAPPED,
3885 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3886         NR_ANON_THPS,
3887 #endif
3888         NR_SHMEM,
3889         NR_FILE_MAPPED,
3890         NR_FILE_DIRTY,
3891         NR_WRITEBACK,
3892         MEMCG_SWAP,
3893 };
3894
3895 static const char *const memcg1_stat_names[] = {
3896         "cache",
3897         "rss",
3898 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3899         "rss_huge",
3900 #endif
3901         "shmem",
3902         "mapped_file",
3903         "dirty",
3904         "writeback",
3905         "swap",
3906 };
3907
3908 /* Universal VM events cgroup1 shows, original sort order */
3909 static const unsigned int memcg1_events[] = {
3910         PGPGIN,
3911         PGPGOUT,
3912         PGFAULT,
3913         PGMAJFAULT,
3914 };
3915
3916 static int memcg_stat_show(struct seq_file *m, void *v)
3917 {
3918         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3919         unsigned long memory, memsw;
3920         struct mem_cgroup *mi;
3921         unsigned int i;
3922
3923         BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
3924
3925         mem_cgroup_flush_stats();
3926
3927         for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3928                 unsigned long nr;
3929
3930                 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3931                         continue;
3932                 nr = memcg_page_state_local(memcg, memcg1_stats[i]);
3933                 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
3934         }
3935
3936         for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3937                 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
3938                            memcg_events_local(memcg, memcg1_events[i]));
3939
3940         for (i = 0; i < NR_LRU_LISTS; i++)
3941                 seq_printf(m, "%s %lu\n", lru_list_name(i),
3942                            memcg_page_state_local(memcg, NR_LRU_BASE + i) *
3943                            PAGE_SIZE);
3944
3945         /* Hierarchical information */
3946         memory = memsw = PAGE_COUNTER_MAX;
3947         for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3948                 memory = min(memory, READ_ONCE(mi->memory.max));
3949                 memsw = min(memsw, READ_ONCE(mi->memsw.max));
3950         }
3951         seq_printf(m, "hierarchical_memory_limit %llu\n",
3952                    (u64)memory * PAGE_SIZE);
3953         if (do_memsw_account())
3954                 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3955                            (u64)memsw * PAGE_SIZE);
3956
3957         for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
3958                 unsigned long nr;
3959
3960                 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
3961                         continue;
3962                 nr = memcg_page_state(memcg, memcg1_stats[i]);
3963                 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
3964                                                 (u64)nr * PAGE_SIZE);
3965         }
3966
3967         for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
3968                 seq_printf(m, "total_%s %llu\n",
3969                            vm_event_name(memcg1_events[i]),
3970                            (u64)memcg_events(memcg, memcg1_events[i]));
3971
3972         for (i = 0; i < NR_LRU_LISTS; i++)
3973                 seq_printf(m, "total_%s %llu\n", lru_list_name(i),
3974                            (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
3975                            PAGE_SIZE);
3976
3977 #ifdef CONFIG_DEBUG_VM
3978         {
3979                 pg_data_t *pgdat;
3980                 struct mem_cgroup_per_node *mz;
3981                 unsigned long anon_cost = 0;
3982                 unsigned long file_cost = 0;
3983
3984                 for_each_online_pgdat(pgdat) {
3985                         mz = memcg->nodeinfo[pgdat->node_id];
3986
3987                         anon_cost += mz->lruvec.anon_cost;
3988                         file_cost += mz->lruvec.file_cost;
3989                 }
3990                 seq_printf(m, "anon_cost %lu\n", anon_cost);
3991                 seq_printf(m, "file_cost %lu\n", file_cost);
3992         }
3993 #endif
3994
3995         return 0;
3996 }
3997
3998 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3999                                       struct cftype *cft)
4000 {
4001         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4002
4003         return mem_cgroup_swappiness(memcg);
4004 }
4005
4006 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4007                                        struct cftype *cft, u64 val)
4008 {
4009         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4010
4011         if (val > 200)
4012                 return -EINVAL;
4013
4014         if (!mem_cgroup_is_root(memcg))
4015                 memcg->swappiness = val;
4016         else
4017                 vm_swappiness = val;
4018
4019         return 0;
4020 }
4021
4022 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4023 {
4024         struct mem_cgroup_threshold_ary *t;
4025         unsigned long usage;
4026         int i;
4027
4028         rcu_read_lock();
4029         if (!swap)
4030                 t = rcu_dereference(memcg->thresholds.primary);
4031         else
4032                 t = rcu_dereference(memcg->memsw_thresholds.primary);
4033
4034         if (!t)
4035                 goto unlock;
4036
4037         usage = mem_cgroup_usage(memcg, swap);
4038
4039         /*
4040          * current_threshold points to threshold just below or equal to usage.
4041          * If it's not true, a threshold was crossed after last
4042          * call of __mem_cgroup_threshold().
4043          */
4044         i = t->current_threshold;
4045
4046         /*
4047          * Iterate backward over array of thresholds starting from
4048          * current_threshold and check if a threshold is crossed.
4049          * If none of thresholds below usage is crossed, we read
4050          * only one element of the array here.
4051          */
4052         for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4053                 eventfd_signal(t->entries[i].eventfd, 1);
4054
4055         /* i = current_threshold + 1 */
4056         i++;
4057
4058         /*
4059          * Iterate forward over array of thresholds starting from
4060          * current_threshold+1 and check if a threshold is crossed.
4061          * If none of thresholds above usage is crossed, we read
4062          * only one element of the array here.
4063          */
4064         for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4065                 eventfd_signal(t->entries[i].eventfd, 1);
4066
4067         /* Update current_threshold */
4068         t->current_threshold = i - 1;
4069 unlock:
4070         rcu_read_unlock();
4071 }
4072
4073 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4074 {
4075         while (memcg) {
4076                 __mem_cgroup_threshold(memcg, false);
4077                 if (do_memsw_account())
4078                         __mem_cgroup_threshold(memcg, true);
4079
4080                 memcg = parent_mem_cgroup(memcg);
4081         }
4082 }
4083
4084 static int compare_thresholds(const void *a, const void *b)
4085 {
4086         const struct mem_cgroup_threshold *_a = a;
4087         const struct mem_cgroup_threshold *_b = b;
4088
4089         if (_a->threshold > _b->threshold)
4090                 return 1;
4091
4092         if (_a->threshold < _b->threshold)
4093                 return -1;
4094
4095         return 0;
4096 }
4097
4098 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4099 {
4100         struct mem_cgroup_eventfd_list *ev;
4101
4102         spin_lock(&memcg_oom_lock);
4103
4104         list_for_each_entry(ev, &memcg->oom_notify, list)
4105                 eventfd_signal(ev->eventfd, 1);
4106
4107         spin_unlock(&memcg_oom_lock);
4108         return 0;
4109 }
4110
4111 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4112 {
4113         struct mem_cgroup *iter;
4114
4115         for_each_mem_cgroup_tree(iter, memcg)
4116                 mem_cgroup_oom_notify_cb(iter);
4117 }
4118
4119 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4120         struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4121 {
4122         struct mem_cgroup_thresholds *thresholds;
4123         struct mem_cgroup_threshold_ary *new;
4124         unsigned long threshold;
4125         unsigned long usage;
4126         int i, size, ret;
4127
4128         ret = page_counter_memparse(args, "-1", &threshold);
4129         if (ret)
4130                 return ret;
4131
4132         mutex_lock(&memcg->thresholds_lock);
4133
4134         if (type == _MEM) {
4135                 thresholds = &memcg->thresholds;
4136                 usage = mem_cgroup_usage(memcg, false);
4137         } else if (type == _MEMSWAP) {
4138                 thresholds = &memcg->memsw_thresholds;
4139                 usage = mem_cgroup_usage(memcg, true);
4140         } else
4141                 BUG();
4142
4143         /* Check if a threshold crossed before adding a new one */
4144         if (thresholds->primary)
4145                 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4146
4147         size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4148
4149         /* Allocate memory for new array of thresholds */
4150         new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4151         if (!new) {
4152                 ret = -ENOMEM;
4153                 goto unlock;
4154         }
4155         new->size = size;
4156
4157         /* Copy thresholds (if any) to new array */
4158         if (thresholds->primary)
4159                 memcpy(new->entries, thresholds->primary->entries,
4160                        flex_array_size(new, entries, size - 1));
4161
4162         /* Add new threshold */
4163         new->entries[size - 1].eventfd = eventfd;
4164         new->entries[size - 1].threshold = threshold;
4165
4166         /* Sort thresholds. Registering of new threshold isn't time-critical */
4167         sort(new->entries, size, sizeof(*new->entries),
4168                         compare_thresholds, NULL);
4169
4170         /* Find current threshold */
4171         new->current_threshold = -1;
4172         for (i = 0; i < size; i++) {
4173                 if (new->entries[i].threshold <= usage) {
4174                         /*
4175                          * new->current_threshold will not be used until
4176                          * rcu_assign_pointer(), so it's safe to increment
4177                          * it here.
4178                          */
4179                         ++new->current_threshold;
4180                 } else
4181                         break;
4182         }
4183
4184         /* Free old spare buffer and save old primary buffer as spare */
4185         kfree(thresholds->spare);
4186         thresholds->spare = thresholds->primary;
4187
4188         rcu_assign_pointer(thresholds->primary, new);
4189
4190         /* To be sure that nobody uses thresholds */
4191         synchronize_rcu();
4192
4193 unlock:
4194         mutex_unlock(&memcg->thresholds_lock);
4195
4196         return ret;
4197 }
4198
4199 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4200         struct eventfd_ctx *eventfd, const char *args)
4201 {
4202         return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4203 }
4204
4205 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4206         struct eventfd_ctx *eventfd, const char *args)
4207 {
4208         return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4209 }
4210
4211 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4212         struct eventfd_ctx *eventfd, enum res_type type)
4213 {
4214         struct mem_cgroup_thresholds *thresholds;
4215         struct mem_cgroup_threshold_ary *new;
4216         unsigned long usage;
4217         int i, j, size, entries;
4218
4219         mutex_lock(&memcg->thresholds_lock);
4220
4221         if (type == _MEM) {
4222                 thresholds = &memcg->thresholds;
4223                 usage = mem_cgroup_usage(memcg, false);
4224         } else if (type == _MEMSWAP) {
4225                 thresholds = &memcg->memsw_thresholds;
4226                 usage = mem_cgroup_usage(memcg, true);
4227         } else
4228                 BUG();
4229
4230         if (!thresholds->primary)
4231                 goto unlock;
4232
4233         /* Check if a threshold crossed before removing */
4234         __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4235
4236         /* Calculate new number of threshold */
4237         size = entries = 0;
4238         for (i = 0; i < thresholds->primary->size; i++) {
4239                 if (thresholds->primary->entries[i].eventfd != eventfd)
4240                         size++;
4241                 else
4242                         entries++;
4243         }
4244
4245         new = thresholds->spare;
4246
4247         /* If no items related to eventfd have been cleared, nothing to do */
4248         if (!entries)
4249                 goto unlock;
4250
4251         /* Set thresholds array to NULL if we don't have thresholds */
4252         if (!size) {
4253                 kfree(new);
4254                 new = NULL;
4255                 goto swap_buffers;
4256         }
4257
4258         new->size = size;
4259
4260         /* Copy thresholds and find current threshold */
4261         new->current_threshold = -1;
4262         for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4263                 if (thresholds->primary->entries[i].eventfd == eventfd)
4264                         continue;
4265
4266                 new->entries[j] = thresholds->primary->entries[i];
4267                 if (new->entries[j].threshold <= usage) {
4268                         /*
4269                          * new->current_threshold will not be used
4270                          * until rcu_assign_pointer(), so it's safe to increment
4271                          * it here.
4272                          */
4273                         ++new->current_threshold;
4274                 }
4275                 j++;
4276         }
4277
4278 swap_buffers:
4279         /* Swap primary and spare array */
4280         thresholds->spare = thresholds->primary;
4281
4282         rcu_assign_pointer(thresholds->primary, new);
4283
4284         /* To be sure that nobody uses thresholds */
4285         synchronize_rcu();
4286
4287         /* If all events are unregistered, free the spare array */
4288         if (!new) {
4289                 kfree(thresholds->spare);
4290                 thresholds->spare = NULL;
4291         }
4292 unlock:
4293         mutex_unlock(&memcg->thresholds_lock);
4294 }
4295
4296 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4297         struct eventfd_ctx *eventfd)
4298 {
4299         return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4300 }
4301
4302 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4303         struct eventfd_ctx *eventfd)
4304 {
4305         return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4306 }
4307
4308 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4309         struct eventfd_ctx *eventfd, const char *args)
4310 {
4311         struct mem_cgroup_eventfd_list *event;
4312
4313         event = kmalloc(sizeof(*event), GFP_KERNEL);
4314         if (!event)
4315                 return -ENOMEM;
4316
4317         spin_lock(&memcg_oom_lock);
4318
4319         event->eventfd = eventfd;
4320         list_add(&event->list, &memcg->oom_notify);
4321
4322         /* already in OOM ? */
4323         if (memcg->under_oom)
4324                 eventfd_signal(eventfd, 1);
4325         spin_unlock(&memcg_oom_lock);
4326
4327         return 0;
4328 }
4329
4330 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4331         struct eventfd_ctx *eventfd)
4332 {
4333         struct mem_cgroup_eventfd_list *ev, *tmp;
4334
4335         spin_lock(&memcg_oom_lock);
4336
4337         list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4338                 if (ev->eventfd == eventfd) {
4339                         list_del(&ev->list);
4340                         kfree(ev);
4341                 }
4342         }
4343
4344         spin_unlock(&memcg_oom_lock);
4345 }
4346
4347 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4348 {
4349         struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4350
4351         seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4352         seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4353         seq_printf(sf, "oom_kill %lu\n",
4354                    atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4355         return 0;
4356 }
4357
4358 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4359         struct cftype *cft, u64 val)
4360 {
4361         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4362
4363         /* cannot set to root cgroup and only 0 and 1 are allowed */
4364         if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4365                 return -EINVAL;
4366
4367         memcg->oom_kill_disable = val;
4368         if (!val)
4369                 memcg_oom_recover(memcg);
4370
4371         return 0;
4372 }
4373
4374 #ifdef CONFIG_CGROUP_WRITEBACK
4375
4376 #include <trace/events/writeback.h>
4377
4378 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4379 {
4380         return wb_domain_init(&memcg->cgwb_domain, gfp);
4381 }
4382
4383 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4384 {
4385         wb_domain_exit(&memcg->cgwb_domain);
4386 }
4387
4388 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4389 {
4390         wb_domain_size_changed(&memcg->cgwb_domain);
4391 }
4392
4393 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4394 {
4395         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4396
4397         if (!memcg->css.parent)
4398                 return NULL;
4399
4400         return &memcg->cgwb_domain;
4401 }
4402
4403 /**
4404  * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4405  * @wb: bdi_writeback in question
4406  * @pfilepages: out parameter for number of file pages
4407  * @pheadroom: out parameter for number of allocatable pages according to memcg
4408  * @pdirty: out parameter for number of dirty pages
4409  * @pwriteback: out parameter for number of pages under writeback
4410  *
4411  * Determine the numbers of file, headroom, dirty, and writeback pages in
4412  * @wb's memcg.  File, dirty and writeback are self-explanatory.  Headroom
4413  * is a bit more involved.
4414  *
4415  * A memcg's headroom is "min(max, high) - used".  In the hierarchy, the
4416  * headroom is calculated as the lowest headroom of itself and the
4417  * ancestors.  Note that this doesn't consider the actual amount of
4418  * available memory in the system.  The caller should further cap
4419  * *@pheadroom accordingly.
4420  */
4421 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4422                          unsigned long *pheadroom, unsigned long *pdirty,
4423                          unsigned long *pwriteback)
4424 {
4425         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4426         struct mem_cgroup *parent;
4427
4428         mem_cgroup_flush_stats();
4429
4430         *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4431         *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4432         *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4433                         memcg_page_state(memcg, NR_ACTIVE_FILE);
4434
4435         *pheadroom = PAGE_COUNTER_MAX;
4436         while ((parent = parent_mem_cgroup(memcg))) {
4437                 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4438                                             READ_ONCE(memcg->memory.high));
4439                 unsigned long used = page_counter_read(&memcg->memory);
4440
4441                 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4442                 memcg = parent;
4443         }
4444 }
4445
4446 /*
4447  * Foreign dirty flushing
4448  *
4449  * There's an inherent mismatch between memcg and writeback.  The former
4450  * tracks ownership per-page while the latter per-inode.  This was a
4451  * deliberate design decision because honoring per-page ownership in the
4452  * writeback path is complicated, may lead to higher CPU and IO overheads
4453  * and deemed unnecessary given that write-sharing an inode across
4454  * different cgroups isn't a common use-case.
4455  *
4456  * Combined with inode majority-writer ownership switching, this works well
4457  * enough in most cases but there are some pathological cases.  For
4458  * example, let's say there are two cgroups A and B which keep writing to
4459  * different but confined parts of the same inode.  B owns the inode and
4460  * A's memory is limited far below B's.  A's dirty ratio can rise enough to
4461  * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4462  * triggering background writeback.  A will be slowed down without a way to
4463  * make writeback of the dirty pages happen.
4464  *
4465  * Conditions like the above can lead to a cgroup getting repeatedly and
4466  * severely throttled after making some progress after each
4467  * dirty_expire_interval while the underlying IO device is almost
4468  * completely idle.
4469  *
4470  * Solving this problem completely requires matching the ownership tracking
4471  * granularities between memcg and writeback in either direction.  However,
4472  * the more egregious behaviors can be avoided by simply remembering the
4473  * most recent foreign dirtying events and initiating remote flushes on
4474  * them when local writeback isn't enough to keep the memory clean enough.
4475  *
4476  * The following two functions implement such mechanism.  When a foreign
4477  * page - a page whose memcg and writeback ownerships don't match - is
4478  * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4479  * bdi_writeback on the page owning memcg.  When balance_dirty_pages()
4480  * decides that the memcg needs to sleep due to high dirty ratio, it calls
4481  * mem_cgroup_flush_foreign() which queues writeback on the recorded
4482  * foreign bdi_writebacks which haven't expired.  Both the numbers of
4483  * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4484  * limited to MEMCG_CGWB_FRN_CNT.
4485  *
4486  * The mechanism only remembers IDs and doesn't hold any object references.
4487  * As being wrong occasionally doesn't matter, updates and accesses to the
4488  * records are lockless and racy.
4489  */
4490 void mem_cgroup_track_foreign_dirty_slowpath(struct folio *folio,
4491                                              struct bdi_writeback *wb)
4492 {
4493         struct mem_cgroup *memcg = folio_memcg(folio);
4494         struct memcg_cgwb_frn *frn;
4495         u64 now = get_jiffies_64();
4496         u64 oldest_at = now;
4497         int oldest = -1;
4498         int i;
4499
4500         trace_track_foreign_dirty(folio, wb);
4501
4502         /*
4503          * Pick the slot to use.  If there is already a slot for @wb, keep
4504          * using it.  If not replace the oldest one which isn't being
4505          * written out.
4506          */
4507         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4508                 frn = &memcg->cgwb_frn[i];
4509                 if (frn->bdi_id == wb->bdi->id &&
4510                     frn->memcg_id == wb->memcg_css->id)
4511                         break;
4512                 if (time_before64(frn->at, oldest_at) &&
4513                     atomic_read(&frn->done.cnt) == 1) {
4514                         oldest = i;
4515                         oldest_at = frn->at;
4516                 }
4517         }
4518
4519         if (i < MEMCG_CGWB_FRN_CNT) {
4520                 /*
4521                  * Re-using an existing one.  Update timestamp lazily to
4522                  * avoid making the cacheline hot.  We want them to be
4523                  * reasonably up-to-date and significantly shorter than
4524                  * dirty_expire_interval as that's what expires the record.
4525                  * Use the shorter of 1s and dirty_expire_interval / 8.
4526                  */
4527                 unsigned long update_intv =
4528                         min_t(unsigned long, HZ,
4529                               msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4530
4531                 if (time_before64(frn->at, now - update_intv))
4532                         frn->at = now;
4533         } else if (oldest >= 0) {
4534                 /* replace the oldest free one */
4535                 frn = &memcg->cgwb_frn[oldest];
4536                 frn->bdi_id = wb->bdi->id;
4537                 frn->memcg_id = wb->memcg_css->id;
4538                 frn->at = now;
4539         }
4540 }
4541
4542 /* issue foreign writeback flushes for recorded foreign dirtying events */
4543 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4544 {
4545         struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4546         unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4547         u64 now = jiffies_64;
4548         int i;
4549
4550         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4551                 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4552
4553                 /*
4554                  * If the record is older than dirty_expire_interval,
4555                  * writeback on it has already started.  No need to kick it
4556                  * off again.  Also, don't start a new one if there's
4557                  * already one in flight.
4558                  */
4559                 if (time_after64(frn->at, now - intv) &&
4560                     atomic_read(&frn->done.cnt) == 1) {
4561                         frn->at = 0;
4562                         trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4563                         cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id,
4564                                                WB_REASON_FOREIGN_FLUSH,
4565                                                &frn->done);
4566                 }
4567         }
4568 }
4569
4570 #else   /* CONFIG_CGROUP_WRITEBACK */
4571
4572 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4573 {
4574         return 0;
4575 }
4576
4577 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4578 {
4579 }
4580
4581 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4582 {
4583 }
4584
4585 #endif  /* CONFIG_CGROUP_WRITEBACK */
4586
4587 /*
4588  * DO NOT USE IN NEW FILES.
4589  *
4590  * "cgroup.event_control" implementation.
4591  *
4592  * This is way over-engineered.  It tries to support fully configurable
4593  * events for each user.  Such level of flexibility is completely
4594  * unnecessary especially in the light of the planned unified hierarchy.
4595  *
4596  * Please deprecate this and replace with something simpler if at all
4597  * possible.
4598  */
4599
4600 /*
4601  * Unregister event and free resources.
4602  *
4603  * Gets called from workqueue.
4604  */
4605 static void memcg_event_remove(struct work_struct *work)
4606 {
4607         struct mem_cgroup_event *event =
4608                 container_of(work, struct mem_cgroup_event, remove);
4609         struct mem_cgroup *memcg = event->memcg;
4610
4611         remove_wait_queue(event->wqh, &event->wait);
4612
4613         event->unregister_event(memcg, event->eventfd);
4614
4615         /* Notify userspace the event is going away. */
4616         eventfd_signal(event->eventfd, 1);
4617
4618         eventfd_ctx_put(event->eventfd);
4619         kfree(event);
4620         css_put(&memcg->css);
4621 }
4622
4623 /*
4624  * Gets called on EPOLLHUP on eventfd when user closes it.
4625  *
4626  * Called with wqh->lock held and interrupts disabled.
4627  */
4628 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4629                             int sync, void *key)
4630 {
4631         struct mem_cgroup_event *event =
4632                 container_of(wait, struct mem_cgroup_event, wait);
4633         struct mem_cgroup *memcg = event->memcg;
4634         __poll_t flags = key_to_poll(key);
4635
4636         if (flags & EPOLLHUP) {
4637                 /*
4638                  * If the event has been detached at cgroup removal, we
4639                  * can simply return knowing the other side will cleanup
4640                  * for us.
4641                  *
4642                  * We can't race against event freeing since the other
4643                  * side will require wqh->lock via remove_wait_queue(),
4644                  * which we hold.
4645                  */
4646                 spin_lock(&memcg->event_list_lock);
4647                 if (!list_empty(&event->list)) {
4648                         list_del_init(&event->list);
4649                         /*
4650                          * We are in atomic context, but cgroup_event_remove()
4651                          * may sleep, so we have to call it in workqueue.
4652                          */
4653                         schedule_work(&event->remove);
4654                 }
4655                 spin_unlock(&memcg->event_list_lock);
4656         }
4657
4658         return 0;
4659 }
4660
4661 static void memcg_event_ptable_queue_proc(struct file *file,
4662                 wait_queue_head_t *wqh, poll_table *pt)
4663 {
4664         struct mem_cgroup_event *event =
4665                 container_of(pt, struct mem_cgroup_event, pt);
4666
4667         event->wqh = wqh;
4668         add_wait_queue(wqh, &event->wait);
4669 }
4670
4671 /*
4672  * DO NOT USE IN NEW FILES.
4673  *
4674  * Parse input and register new cgroup event handler.
4675  *
4676  * Input must be in format '<event_fd> <control_fd> <args>'.
4677  * Interpretation of args is defined by control file implementation.
4678  */
4679 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4680                                          char *buf, size_t nbytes, loff_t off)
4681 {
4682         struct cgroup_subsys_state *css = of_css(of);
4683         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4684         struct mem_cgroup_event *event;
4685         struct cgroup_subsys_state *cfile_css;
4686         unsigned int efd, cfd;
4687         struct fd efile;
4688         struct fd cfile;
4689         const char *name;
4690         char *endp;
4691         int ret;
4692
4693         if (IS_ENABLED(CONFIG_PREEMPT_RT))
4694                 return -EOPNOTSUPP;
4695
4696         buf = strstrip(buf);
4697
4698         efd = simple_strtoul(buf, &endp, 10);
4699         if (*endp != ' ')
4700                 return -EINVAL;
4701         buf = endp + 1;
4702
4703         cfd = simple_strtoul(buf, &endp, 10);
4704         if ((*endp != ' ') && (*endp != '\0'))
4705                 return -EINVAL;
4706         buf = endp + 1;
4707
4708         event = kzalloc(sizeof(*event), GFP_KERNEL);
4709         if (!event)
4710                 return -ENOMEM;
4711
4712         event->memcg = memcg;
4713         INIT_LIST_HEAD(&event->list);
4714         init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4715         init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4716         INIT_WORK(&event->remove, memcg_event_remove);
4717
4718         efile = fdget(efd);
4719         if (!efile.file) {
4720                 ret = -EBADF;
4721                 goto out_kfree;
4722         }
4723
4724         event->eventfd = eventfd_ctx_fileget(efile.file);
4725         if (IS_ERR(event->eventfd)) {
4726                 ret = PTR_ERR(event->eventfd);
4727                 goto out_put_efile;
4728         }
4729
4730         cfile = fdget(cfd);
4731         if (!cfile.file) {
4732                 ret = -EBADF;
4733                 goto out_put_eventfd;
4734         }
4735
4736         /* the process need read permission on control file */
4737         /* AV: shouldn't we check that it's been opened for read instead? */
4738         ret = file_permission(cfile.file, MAY_READ);
4739         if (ret < 0)
4740                 goto out_put_cfile;
4741
4742         /*
4743          * Determine the event callbacks and set them in @event.  This used
4744          * to be done via struct cftype but cgroup core no longer knows
4745          * about these events.  The following is crude but the whole thing
4746          * is for compatibility anyway.
4747          *
4748          * DO NOT ADD NEW FILES.
4749          */
4750         name = cfile.file->f_path.dentry->d_name.name;
4751
4752         if (!strcmp(name, "memory.usage_in_bytes")) {
4753                 event->register_event = mem_cgroup_usage_register_event;
4754                 event->unregister_event = mem_cgroup_usage_unregister_event;
4755         } else if (!strcmp(name, "memory.oom_control")) {
4756                 event->register_event = mem_cgroup_oom_register_event;
4757                 event->unregister_event = mem_cgroup_oom_unregister_event;
4758         } else if (!strcmp(name, "memory.pressure_level")) {
4759                 event->register_event = vmpressure_register_event;
4760                 event->unregister_event = vmpressure_unregister_event;
4761         } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4762                 event->register_event = memsw_cgroup_usage_register_event;
4763                 event->unregister_event = memsw_cgroup_usage_unregister_event;
4764         } else {
4765                 ret = -EINVAL;
4766                 goto out_put_cfile;
4767         }
4768
4769         /*
4770          * Verify @cfile should belong to @css.  Also, remaining events are
4771          * automatically removed on cgroup destruction but the removal is
4772          * asynchronous, so take an extra ref on @css.
4773          */
4774         cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4775                                                &memory_cgrp_subsys);
4776         ret = -EINVAL;
4777         if (IS_ERR(cfile_css))
4778                 goto out_put_cfile;
4779         if (cfile_css != css) {
4780                 css_put(cfile_css);
4781                 goto out_put_cfile;
4782         }
4783
4784         ret = event->register_event(memcg, event->eventfd, buf);
4785         if (ret)
4786                 goto out_put_css;
4787
4788         vfs_poll(efile.file, &event->pt);
4789
4790         spin_lock_irq(&memcg->event_list_lock);
4791         list_add(&event->list, &memcg->event_list);
4792         spin_unlock_irq(&memcg->event_list_lock);
4793
4794         fdput(cfile);
4795         fdput(efile);
4796
4797         return nbytes;
4798
4799 out_put_css:
4800         css_put(css);
4801 out_put_cfile:
4802         fdput(cfile);
4803 out_put_eventfd:
4804         eventfd_ctx_put(event->eventfd);
4805 out_put_efile:
4806         fdput(efile);
4807 out_kfree:
4808         kfree(event);
4809
4810         return ret;
4811 }
4812
4813 #if defined(CONFIG_MEMCG_KMEM) && (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
4814 static int mem_cgroup_slab_show(struct seq_file *m, void *p)
4815 {
4816         /*
4817          * Deprecated.
4818          * Please, take a look at tools/cgroup/slabinfo.py .
4819          */
4820         return 0;
4821 }
4822 #endif
4823
4824 static struct cftype mem_cgroup_legacy_files[] = {
4825         {
4826                 .name = "usage_in_bytes",
4827                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4828                 .read_u64 = mem_cgroup_read_u64,
4829         },
4830         {
4831                 .name = "max_usage_in_bytes",
4832                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4833                 .write = mem_cgroup_reset,
4834                 .read_u64 = mem_cgroup_read_u64,
4835         },
4836         {
4837                 .name = "limit_in_bytes",
4838                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4839                 .write = mem_cgroup_write,
4840                 .read_u64 = mem_cgroup_read_u64,
4841         },
4842         {
4843                 .name = "soft_limit_in_bytes",
4844                 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4845                 .write = mem_cgroup_write,
4846                 .read_u64 = mem_cgroup_read_u64,
4847         },
4848         {
4849                 .name = "failcnt",
4850                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4851                 .write = mem_cgroup_reset,
4852                 .read_u64 = mem_cgroup_read_u64,
4853         },
4854         {
4855                 .name = "stat",
4856                 .seq_show = memcg_stat_show,
4857         },
4858         {
4859                 .name = "force_empty",
4860                 .write = mem_cgroup_force_empty_write,
4861         },
4862         {
4863                 .name = "use_hierarchy",
4864                 .write_u64 = mem_cgroup_hierarchy_write,
4865                 .read_u64 = mem_cgroup_hierarchy_read,
4866         },
4867         {
4868                 .name = "cgroup.event_control",         /* XXX: for compat */
4869                 .write = memcg_write_event_control,
4870                 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4871         },
4872         {
4873                 .name = "swappiness",
4874                 .read_u64 = mem_cgroup_swappiness_read,
4875                 .write_u64 = mem_cgroup_swappiness_write,
4876         },
4877         {
4878                 .name = "move_charge_at_immigrate",
4879                 .read_u64 = mem_cgroup_move_charge_read,
4880                 .write_u64 = mem_cgroup_move_charge_write,
4881         },
4882         {
4883                 .name = "oom_control",
4884                 .seq_show = mem_cgroup_oom_control_read,
4885                 .write_u64 = mem_cgroup_oom_control_write,
4886                 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4887         },
4888         {
4889                 .name = "pressure_level",
4890         },
4891 #ifdef CONFIG_NUMA
4892         {
4893                 .name = "numa_stat",
4894                 .seq_show = memcg_numa_stat_show,
4895         },
4896 #endif
4897         {
4898                 .name = "kmem.limit_in_bytes",
4899                 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4900                 .write = mem_cgroup_write,
4901                 .read_u64 = mem_cgroup_read_u64,
4902         },
4903         {
4904                 .name = "kmem.usage_in_bytes",
4905                 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4906                 .read_u64 = mem_cgroup_read_u64,
4907         },
4908         {
4909                 .name = "kmem.failcnt",
4910                 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4911                 .write = mem_cgroup_reset,
4912                 .read_u64 = mem_cgroup_read_u64,
4913         },
4914         {
4915                 .name = "kmem.max_usage_in_bytes",
4916                 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4917                 .write = mem_cgroup_reset,
4918                 .read_u64 = mem_cgroup_read_u64,
4919         },
4920 #if defined(CONFIG_MEMCG_KMEM) && \
4921         (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
4922         {
4923                 .name = "kmem.slabinfo",
4924                 .seq_show = mem_cgroup_slab_show,
4925         },
4926 #endif
4927         {
4928                 .name = "kmem.tcp.limit_in_bytes",
4929                 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
4930                 .write = mem_cgroup_write,
4931                 .read_u64 = mem_cgroup_read_u64,
4932         },
4933         {
4934                 .name = "kmem.tcp.usage_in_bytes",
4935                 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
4936                 .read_u64 = mem_cgroup_read_u64,
4937         },
4938         {
4939                 .name = "kmem.tcp.failcnt",
4940                 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
4941                 .write = mem_cgroup_reset,
4942                 .read_u64 = mem_cgroup_read_u64,
4943         },
4944         {
4945                 .name = "kmem.tcp.max_usage_in_bytes",
4946                 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
4947                 .write = mem_cgroup_reset,
4948                 .read_u64 = mem_cgroup_read_u64,
4949         },
4950         { },    /* terminate */
4951 };
4952
4953 /*
4954  * Private memory cgroup IDR
4955  *
4956  * Swap-out records and page cache shadow entries need to store memcg
4957  * references in constrained space, so we maintain an ID space that is
4958  * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
4959  * memory-controlled cgroups to 64k.
4960  *
4961  * However, there usually are many references to the offline CSS after
4962  * the cgroup has been destroyed, such as page cache or reclaimable
4963  * slab objects, that don't need to hang on to the ID. We want to keep
4964  * those dead CSS from occupying IDs, or we might quickly exhaust the
4965  * relatively small ID space and prevent the creation of new cgroups
4966  * even when there are much fewer than 64k cgroups - possibly none.
4967  *
4968  * Maintain a private 16-bit ID space for memcg, and allow the ID to
4969  * be freed and recycled when it's no longer needed, which is usually
4970  * when the CSS is offlined.
4971  *
4972  * The only exception to that are records of swapped out tmpfs/shmem
4973  * pages that need to be attributed to live ancestors on swapin. But
4974  * those references are manageable from userspace.
4975  */
4976
4977 static DEFINE_IDR(mem_cgroup_idr);
4978
4979 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
4980 {
4981         if (memcg->id.id > 0) {
4982                 idr_remove(&mem_cgroup_idr, memcg->id.id);
4983                 memcg->id.id = 0;
4984         }
4985 }
4986
4987 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
4988                                                   unsigned int n)
4989 {
4990         refcount_add(n, &memcg->id.ref);
4991 }
4992
4993 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
4994 {
4995         if (refcount_sub_and_test(n, &memcg->id.ref)) {
4996                 mem_cgroup_id_remove(memcg);
4997
4998                 /* Memcg ID pins CSS */
4999                 css_put(&memcg->css);
5000         }
5001 }
5002
5003 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5004 {
5005         mem_cgroup_id_put_many(memcg, 1);
5006 }
5007
5008 /**
5009  * mem_cgroup_from_id - look up a memcg from a memcg id
5010  * @id: the memcg id to look up
5011  *
5012  * Caller must hold rcu_read_lock().
5013  */
5014 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5015 {
5016         WARN_ON_ONCE(!rcu_read_lock_held());
5017         return idr_find(&mem_cgroup_idr, id);
5018 }
5019
5020 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5021 {
5022         struct mem_cgroup_per_node *pn;
5023
5024         pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, node);
5025         if (!pn)
5026                 return 1;
5027
5028         pn->lruvec_stats_percpu = alloc_percpu_gfp(struct lruvec_stats_percpu,
5029                                                    GFP_KERNEL_ACCOUNT);
5030         if (!pn->lruvec_stats_percpu) {
5031                 kfree(pn);
5032                 return 1;
5033         }
5034
5035         lruvec_init(&pn->lruvec);
5036         pn->memcg = memcg;
5037
5038         memcg->nodeinfo[node] = pn;
5039         return 0;
5040 }
5041
5042 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5043 {
5044         struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5045
5046         if (!pn)
5047                 return;
5048
5049         free_percpu(pn->lruvec_stats_percpu);
5050         kfree(pn);
5051 }
5052
5053 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5054 {
5055         int node;
5056
5057         for_each_node(node)
5058                 free_mem_cgroup_per_node_info(memcg, node);
5059         free_percpu(memcg->vmstats_percpu);
5060         kfree(memcg);
5061 }
5062
5063 static void mem_cgroup_free(struct mem_cgroup *memcg)
5064 {
5065         memcg_wb_domain_exit(memcg);
5066         __mem_cgroup_free(memcg);
5067 }
5068
5069 static struct mem_cgroup *mem_cgroup_alloc(void)
5070 {
5071         struct mem_cgroup *memcg;
5072         int node;
5073         int __maybe_unused i;
5074         long error = -ENOMEM;
5075
5076         memcg = kzalloc(struct_size(memcg, nodeinfo, nr_node_ids), GFP_KERNEL);
5077         if (!memcg)
5078                 return ERR_PTR(error);
5079
5080         memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5081                                  1, MEM_CGROUP_ID_MAX + 1, GFP_KERNEL);
5082         if (memcg->id.id < 0) {
5083                 error = memcg->id.id;
5084                 goto fail;
5085         }
5086
5087         memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5088                                                  GFP_KERNEL_ACCOUNT);
5089         if (!memcg->vmstats_percpu)
5090                 goto fail;
5091
5092         for_each_node(node)
5093                 if (alloc_mem_cgroup_per_node_info(memcg, node))
5094                         goto fail;
5095
5096         if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5097                 goto fail;
5098
5099         INIT_WORK(&memcg->high_work, high_work_func);
5100         INIT_LIST_HEAD(&memcg->oom_notify);
5101         mutex_init(&memcg->thresholds_lock);
5102         spin_lock_init(&memcg->move_lock);
5103         vmpressure_init(&memcg->vmpressure);
5104         INIT_LIST_HEAD(&memcg->event_list);
5105         spin_lock_init(&memcg->event_list_lock);
5106         memcg->socket_pressure = jiffies;
5107 #ifdef CONFIG_MEMCG_KMEM
5108         memcg->kmemcg_id = -1;
5109         INIT_LIST_HEAD(&memcg->objcg_list);
5110 #endif
5111 #ifdef CONFIG_CGROUP_WRITEBACK
5112         INIT_LIST_HEAD(&memcg->cgwb_list);
5113         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5114                 memcg->cgwb_frn[i].done =
5115                         __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5116 #endif
5117 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5118         spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5119         INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5120         memcg->deferred_split_queue.split_queue_len = 0;
5121 #endif
5122         idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5123         return memcg;
5124 fail:
5125         mem_cgroup_id_remove(memcg);
5126         __mem_cgroup_free(memcg);
5127         return ERR_PTR(error);
5128 }
5129
5130 static struct cgroup_subsys_state * __ref
5131 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5132 {
5133         struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5134         struct mem_cgroup *memcg, *old_memcg;
5135
5136         old_memcg = set_active_memcg(parent);
5137         memcg = mem_cgroup_alloc();
5138         set_active_memcg(old_memcg);
5139         if (IS_ERR(memcg))
5140                 return ERR_CAST(memcg);
5141
5142         page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5143         memcg->soft_limit = PAGE_COUNTER_MAX;
5144         page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5145         if (parent) {
5146                 memcg->swappiness = mem_cgroup_swappiness(parent);
5147                 memcg->oom_kill_disable = parent->oom_kill_disable;
5148
5149                 page_counter_init(&memcg->memory, &parent->memory);
5150                 page_counter_init(&memcg->swap, &parent->swap);
5151                 page_counter_init(&memcg->kmem, &parent->kmem);
5152                 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5153         } else {
5154                 page_counter_init(&memcg->memory, NULL);
5155                 page_counter_init(&memcg->swap, NULL);
5156                 page_counter_init(&memcg->kmem, NULL);
5157                 page_counter_init(&memcg->tcpmem, NULL);
5158
5159                 root_mem_cgroup = memcg;
5160                 return &memcg->css;
5161         }
5162
5163         if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5164                 static_branch_inc(&memcg_sockets_enabled_key);
5165
5166         return &memcg->css;
5167 }
5168
5169 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5170 {
5171         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5172
5173         if (memcg_online_kmem(memcg))
5174                 goto remove_id;
5175
5176         /*
5177          * A memcg must be visible for expand_shrinker_info()
5178          * by the time the maps are allocated. So, we allocate maps
5179          * here, when for_each_mem_cgroup() can't skip it.
5180          */
5181         if (alloc_shrinker_info(memcg))
5182                 goto offline_kmem;
5183
5184         /* Online state pins memcg ID, memcg ID pins CSS */
5185         refcount_set(&memcg->id.ref, 1);
5186         css_get(css);
5187
5188         if (unlikely(mem_cgroup_is_root(memcg)))
5189                 queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
5190                                    2UL*HZ);
5191         return 0;
5192 offline_kmem:
5193         memcg_offline_kmem(memcg);
5194 remove_id:
5195         mem_cgroup_id_remove(memcg);
5196         return -ENOMEM;
5197 }
5198
5199 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5200 {
5201         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5202         struct mem_cgroup_event *event, *tmp;
5203
5204         /*
5205          * Unregister events and notify userspace.
5206          * Notify userspace about cgroup removing only after rmdir of cgroup
5207          * directory to avoid race between userspace and kernelspace.
5208          */
5209         spin_lock_irq(&memcg->event_list_lock);
5210         list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5211                 list_del_init(&event->list);
5212                 schedule_work(&event->remove);
5213         }
5214         spin_unlock_irq(&memcg->event_list_lock);
5215
5216         page_counter_set_min(&memcg->memory, 0);
5217         page_counter_set_low(&memcg->memory, 0);
5218
5219         memcg_offline_kmem(memcg);
5220         reparent_shrinker_deferred(memcg);
5221         wb_memcg_offline(memcg);
5222
5223         drain_all_stock(memcg);
5224
5225         mem_cgroup_id_put(memcg);
5226 }
5227
5228 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5229 {
5230         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5231
5232         invalidate_reclaim_iterators(memcg);
5233 }
5234
5235 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5236 {
5237         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5238         int __maybe_unused i;
5239
5240 #ifdef CONFIG_CGROUP_WRITEBACK
5241         for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5242                 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5243 #endif
5244         if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5245                 static_branch_dec(&memcg_sockets_enabled_key);
5246
5247         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5248                 static_branch_dec(&memcg_sockets_enabled_key);
5249
5250         vmpressure_cleanup(&memcg->vmpressure);
5251         cancel_work_sync(&memcg->high_work);
5252         mem_cgroup_remove_from_trees(memcg);
5253         free_shrinker_info(memcg);
5254         mem_cgroup_free(memcg);
5255 }
5256
5257 /**
5258  * mem_cgroup_css_reset - reset the states of a mem_cgroup
5259  * @css: the target css
5260  *
5261  * Reset the states of the mem_cgroup associated with @css.  This is
5262  * invoked when the userland requests disabling on the default hierarchy
5263  * but the memcg is pinned through dependency.  The memcg should stop
5264  * applying policies and should revert to the vanilla state as it may be
5265  * made visible again.
5266  *
5267  * The current implementation only resets the essential configurations.
5268  * This needs to be expanded to cover all the visible parts.
5269  */
5270 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5271 {
5272         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5273
5274         page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5275         page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5276         page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5277         page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5278         page_counter_set_min(&memcg->memory, 0);
5279         page_counter_set_low(&memcg->memory, 0);
5280         page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5281         memcg->soft_limit = PAGE_COUNTER_MAX;
5282         page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5283         memcg_wb_domain_size_changed(memcg);
5284 }
5285
5286 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5287 {
5288         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5289         struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5290         struct memcg_vmstats_percpu *statc;
5291         long delta, v;
5292         int i, nid;
5293
5294         statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5295
5296         for (i = 0; i < MEMCG_NR_STAT; i++) {
5297                 /*
5298                  * Collect the aggregated propagation counts of groups
5299                  * below us. We're in a per-cpu loop here and this is
5300                  * a global counter, so the first cycle will get them.
5301                  */
5302                 delta = memcg->vmstats.state_pending[i];
5303                 if (delta)
5304                         memcg->vmstats.state_pending[i] = 0;
5305
5306                 /* Add CPU changes on this level since the last flush */
5307                 v = READ_ONCE(statc->state[i]);
5308                 if (v != statc->state_prev[i]) {
5309                         delta += v - statc->state_prev[i];
5310                         statc->state_prev[i] = v;
5311                 }
5312
5313                 if (!delta)
5314                         continue;
5315
5316                 /* Aggregate counts on this level and propagate upwards */
5317                 memcg->vmstats.state[i] += delta;
5318                 if (parent)
5319                         parent->vmstats.state_pending[i] += delta;
5320         }
5321
5322         for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
5323                 delta = memcg->vmstats.events_pending[i];
5324                 if (delta)
5325                         memcg->vmstats.events_pending[i] = 0;
5326
5327                 v = READ_ONCE(statc->events[i]);
5328                 if (v != statc->events_prev[i]) {
5329                         delta += v - statc->events_prev[i];
5330                         statc->events_prev[i] = v;
5331                 }
5332
5333                 if (!delta)
5334                         continue;
5335
5336                 memcg->vmstats.events[i] += delta;
5337                 if (parent)
5338                         parent->vmstats.events_pending[i] += delta;
5339         }
5340
5341         for_each_node_state(nid, N_MEMORY) {
5342                 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
5343                 struct mem_cgroup_per_node *ppn = NULL;
5344                 struct lruvec_stats_percpu *lstatc;
5345
5346                 if (parent)
5347                         ppn = parent->nodeinfo[nid];
5348
5349                 lstatc = per_cpu_ptr(pn->lruvec_stats_percpu, cpu);
5350
5351                 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
5352                         delta = pn->lruvec_stats.state_pending[i];
5353                         if (delta)
5354                                 pn->lruvec_stats.state_pending[i] = 0;
5355
5356                         v = READ_ONCE(lstatc->state[i]);
5357                         if (v != lstatc->state_prev[i]) {
5358                                 delta += v - lstatc->state_prev[i];
5359                                 lstatc->state_prev[i] = v;
5360                         }
5361
5362                         if (!delta)
5363                                 continue;
5364
5365                         pn->lruvec_stats.state[i] += delta;
5366                         if (ppn)
5367                                 ppn->lruvec_stats.state_pending[i] += delta;
5368                 }
5369         }
5370 }
5371
5372 #ifdef CONFIG_MMU
5373 /* Handlers for move charge at task migration. */
5374 static int mem_cgroup_do_precharge(unsigned long count)
5375 {
5376         int ret;
5377
5378         /* Try a single bulk charge without reclaim first, kswapd may wake */
5379         ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5380         if (!ret) {
5381                 mc.precharge += count;
5382                 return ret;
5383         }
5384
5385         /* Try charges one by one with reclaim, but do not retry */
5386         while (count--) {
5387                 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5388                 if (ret)
5389                         return ret;
5390                 mc.precharge++;
5391                 cond_resched();
5392         }
5393         return 0;
5394 }
5395
5396 union mc_target {
5397         struct page     *page;
5398         swp_entry_t     ent;
5399 };
5400
5401 enum mc_target_type {
5402         MC_TARGET_NONE = 0,
5403         MC_TARGET_PAGE,
5404         MC_TARGET_SWAP,
5405         MC_TARGET_DEVICE,
5406 };
5407
5408 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5409                                                 unsigned long addr, pte_t ptent)
5410 {
5411         struct page *page = vm_normal_page(vma, addr, ptent);
5412
5413         if (!page || !page_mapped(page))
5414                 return NULL;
5415         if (PageAnon(page)) {
5416                 if (!(mc.flags & MOVE_ANON))
5417                         return NULL;
5418         } else {
5419                 if (!(mc.flags & MOVE_FILE))
5420                         return NULL;
5421         }
5422         if (!get_page_unless_zero(page))
5423                 return NULL;
5424
5425         return page;
5426 }
5427
5428 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5429 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5430                         pte_t ptent, swp_entry_t *entry)
5431 {
5432         struct page *page = NULL;
5433         swp_entry_t ent = pte_to_swp_entry(ptent);
5434
5435         if (!(mc.flags & MOVE_ANON))
5436                 return NULL;
5437
5438         /*
5439          * Handle device private pages that are not accessible by the CPU, but
5440          * stored as special swap entries in the page table.
5441          */
5442         if (is_device_private_entry(ent)) {
5443                 page = pfn_swap_entry_to_page(ent);
5444                 if (!get_page_unless_zero(page))
5445                         return NULL;
5446                 return page;
5447         }
5448
5449         if (non_swap_entry(ent))
5450                 return NULL;
5451
5452         /*
5453          * Because lookup_swap_cache() updates some statistics counter,
5454          * we call find_get_page() with swapper_space directly.
5455          */
5456         page = find_get_page(swap_address_space(ent), swp_offset(ent));
5457         entry->val = ent.val;
5458
5459         return page;
5460 }
5461 #else
5462 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5463                         pte_t ptent, swp_entry_t *entry)
5464 {
5465         return NULL;
5466 }
5467 #endif
5468
5469 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5470                         unsigned long addr, pte_t ptent)
5471 {
5472         if (!vma->vm_file) /* anonymous vma */
5473                 return NULL;
5474         if (!(mc.flags & MOVE_FILE))
5475                 return NULL;
5476
5477         /* page is moved even if it's not RSS of this task(page-faulted). */
5478         /* shmem/tmpfs may report page out on swap: account for that too. */
5479         return find_get_incore_page(vma->vm_file->f_mapping,
5480                         linear_page_index(vma, addr));
5481 }
5482
5483 /**
5484  * mem_cgroup_move_account - move account of the page
5485  * @page: the page
5486  * @compound: charge the page as compound or small page
5487  * @from: mem_cgroup which the page is moved from.
5488  * @to: mem_cgroup which the page is moved to. @from != @to.
5489  *
5490  * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5491  *
5492  * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5493  * from old cgroup.
5494  */
5495 static int mem_cgroup_move_account(struct page *page,
5496                                    bool compound,
5497                                    struct mem_cgroup *from,
5498                                    struct mem_cgroup *to)
5499 {
5500         struct folio *folio = page_folio(page);
5501         struct lruvec *from_vec, *to_vec;
5502         struct pglist_data *pgdat;
5503         unsigned int nr_pages = compound ? folio_nr_pages(folio) : 1;
5504         int nid, ret;
5505
5506         VM_BUG_ON(from == to);
5507         VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
5508         VM_BUG_ON(compound && !folio_test_large(folio));
5509
5510         /*
5511          * Prevent mem_cgroup_migrate() from looking at
5512          * page's memory cgroup of its source page while we change it.
5513          */
5514         ret = -EBUSY;
5515         if (!folio_trylock(folio))
5516                 goto out;
5517
5518         ret = -EINVAL;
5519         if (folio_memcg(folio) != from)
5520                 goto out_unlock;
5521
5522         pgdat = folio_pgdat(folio);
5523         from_vec = mem_cgroup_lruvec(from, pgdat);
5524         to_vec = mem_cgroup_lruvec(to, pgdat);
5525
5526         folio_memcg_lock(folio);
5527
5528         if (folio_test_anon(folio)) {
5529                 if (folio_mapped(folio)) {
5530                         __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5531                         __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5532                         if (folio_test_transhuge(folio)) {
5533                                 __mod_lruvec_state(from_vec, NR_ANON_THPS,
5534                                                    -nr_pages);
5535                                 __mod_lruvec_state(to_vec, NR_ANON_THPS,
5536                                                    nr_pages);
5537                         }
5538                 }
5539         } else {
5540                 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5541                 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5542
5543                 if (folio_test_swapbacked(folio)) {
5544                         __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5545                         __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5546                 }
5547
5548                 if (folio_mapped(folio)) {
5549                         __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5550                         __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5551                 }
5552
5553                 if (folio_test_dirty(folio)) {
5554                         struct address_space *mapping = folio_mapping(folio);
5555
5556                         if (mapping_can_writeback(mapping)) {
5557                                 __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5558                                                    -nr_pages);
5559                                 __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5560                                                    nr_pages);
5561                         }
5562                 }
5563         }
5564
5565         if (folio_test_writeback(folio)) {
5566                 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5567                 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5568         }
5569
5570         /*
5571          * All state has been migrated, let's switch to the new memcg.
5572          *
5573          * It is safe to change page's memcg here because the page
5574          * is referenced, charged, isolated, and locked: we can't race
5575          * with (un)charging, migration, LRU putback, or anything else
5576          * that would rely on a stable page's memory cgroup.
5577          *
5578          * Note that lock_page_memcg is a memcg lock, not a page lock,
5579          * to save space. As soon as we switch page's memory cgroup to a
5580          * new memcg that isn't locked, the above state can change
5581          * concurrently again. Make sure we're truly done with it.
5582          */
5583         smp_mb();
5584
5585         css_get(&to->css);
5586         css_put(&from->css);
5587
5588         folio->memcg_data = (unsigned long)to;
5589
5590         __folio_memcg_unlock(from);
5591
5592         ret = 0;
5593         nid = folio_nid(folio);
5594
5595         local_irq_disable();
5596         mem_cgroup_charge_statistics(to, nr_pages);
5597         memcg_check_events(to, nid);
5598         mem_cgroup_charge_statistics(from, -nr_pages);
5599         memcg_check_events(from, nid);
5600         local_irq_enable();
5601 out_unlock:
5602         folio_unlock(folio);
5603 out:
5604         return ret;
5605 }
5606
5607 /**
5608  * get_mctgt_type - get target type of moving charge
5609  * @vma: the vma the pte to be checked belongs
5610  * @addr: the address corresponding to the pte to be checked
5611  * @ptent: the pte to be checked
5612  * @target: the pointer the target page or swap ent will be stored(can be NULL)
5613  *
5614  * Returns
5615  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
5616  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5617  *     move charge. if @target is not NULL, the page is stored in target->page
5618  *     with extra refcnt got(Callers should handle it).
5619  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5620  *     target for charge migration. if @target is not NULL, the entry is stored
5621  *     in target->ent.
5622  *   3(MC_TARGET_DEVICE): like MC_TARGET_PAGE  but page is MEMORY_DEVICE_PRIVATE
5623  *     (so ZONE_DEVICE page and thus not on the lru).
5624  *     For now we such page is charge like a regular page would be as for all
5625  *     intent and purposes it is just special memory taking the place of a
5626  *     regular page.
5627  *
5628  *     See Documentations/vm/hmm.txt and include/linux/hmm.h
5629  *
5630  * Called with pte lock held.
5631  */
5632
5633 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5634                 unsigned long addr, pte_t ptent, union mc_target *target)
5635 {
5636         struct page *page = NULL;
5637         enum mc_target_type ret = MC_TARGET_NONE;
5638         swp_entry_t ent = { .val = 0 };
5639
5640         if (pte_present(ptent))
5641                 page = mc_handle_present_pte(vma, addr, ptent);
5642         else if (is_swap_pte(ptent))
5643                 page = mc_handle_swap_pte(vma, ptent, &ent);
5644         else if (pte_none(ptent))
5645                 page = mc_handle_file_pte(vma, addr, ptent);
5646
5647         if (!page && !ent.val)
5648                 return ret;
5649         if (page) {
5650                 /*
5651                  * Do only loose check w/o serialization.
5652                  * mem_cgroup_move_account() checks the page is valid or
5653                  * not under LRU exclusion.
5654                  */
5655                 if (page_memcg(page) == mc.from) {
5656                         ret = MC_TARGET_PAGE;
5657                         if (is_device_private_page(page))
5658                                 ret = MC_TARGET_DEVICE;
5659                         if (target)
5660                                 target->page = page;
5661                 }
5662                 if (!ret || !target)
5663                         put_page(page);
5664         }
5665         /*
5666          * There is a swap entry and a page doesn't exist or isn't charged.
5667          * But we cannot move a tail-page in a THP.
5668          */
5669         if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5670             mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5671                 ret = MC_TARGET_SWAP;
5672                 if (target)
5673                         target->ent = ent;
5674         }
5675         return ret;
5676 }
5677
5678 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5679 /*
5680  * We don't consider PMD mapped swapping or file mapped pages because THP does
5681  * not support them for now.
5682  * Caller should make sure that pmd_trans_huge(pmd) is true.
5683  */
5684 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5685                 unsigned long addr, pmd_t pmd, union mc_target *target)
5686 {
5687         struct page *page = NULL;
5688         enum mc_target_type ret = MC_TARGET_NONE;
5689
5690         if (unlikely(is_swap_pmd(pmd))) {
5691                 VM_BUG_ON(thp_migration_supported() &&
5692                                   !is_pmd_migration_entry(pmd));
5693                 return ret;
5694         }
5695         page = pmd_page(pmd);
5696         VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5697         if (!(mc.flags & MOVE_ANON))
5698                 return ret;
5699         if (page_memcg(page) == mc.from) {
5700                 ret = MC_TARGET_PAGE;
5701                 if (target) {
5702                         get_page(page);
5703                         target->page = page;
5704                 }
5705         }
5706         return ret;
5707 }
5708 #else
5709 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5710                 unsigned long addr, pmd_t pmd, union mc_target *target)
5711 {
5712         return MC_TARGET_NONE;
5713 }
5714 #endif
5715
5716 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5717                                         unsigned long addr, unsigned long end,
5718                                         struct mm_walk *walk)
5719 {
5720         struct vm_area_struct *vma = walk->vma;
5721         pte_t *pte;
5722         spinlock_t *ptl;
5723
5724         ptl = pmd_trans_huge_lock(pmd, vma);
5725         if (ptl) {
5726                 /*
5727                  * Note their can not be MC_TARGET_DEVICE for now as we do not
5728                  * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5729                  * this might change.
5730                  */
5731                 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5732                         mc.precharge += HPAGE_PMD_NR;
5733                 spin_unlock(ptl);
5734                 return 0;
5735         }
5736
5737         if (pmd_trans_unstable(pmd))
5738                 return 0;
5739         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5740         for (; addr != end; pte++, addr += PAGE_SIZE)
5741                 if (get_mctgt_type(vma, addr, *pte, NULL))
5742                         mc.precharge++; /* increment precharge temporarily */
5743         pte_unmap_unlock(pte - 1, ptl);
5744         cond_resched();
5745
5746         return 0;
5747 }
5748
5749 static const struct mm_walk_ops precharge_walk_ops = {
5750         .pmd_entry      = mem_cgroup_count_precharge_pte_range,
5751 };
5752
5753 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5754 {
5755         unsigned long precharge;
5756
5757         mmap_read_lock(mm);
5758         walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5759         mmap_read_unlock(mm);
5760
5761         precharge = mc.precharge;
5762         mc.precharge = 0;
5763
5764         return precharge;
5765 }
5766
5767 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5768 {
5769         unsigned long precharge = mem_cgroup_count_precharge(mm);
5770
5771         VM_BUG_ON(mc.moving_task);
5772         mc.moving_task = current;
5773         return mem_cgroup_do_precharge(precharge);
5774 }
5775
5776 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5777 static void __mem_cgroup_clear_mc(void)
5778 {
5779         struct mem_cgroup *from = mc.from;
5780         struct mem_cgroup *to = mc.to;
5781
5782         /* we must uncharge all the leftover precharges from mc.to */
5783         if (mc.precharge) {
5784                 cancel_charge(mc.to, mc.precharge);
5785                 mc.precharge = 0;
5786         }
5787         /*
5788          * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5789          * we must uncharge here.
5790          */
5791         if (mc.moved_charge) {
5792                 cancel_charge(mc.from, mc.moved_charge);
5793                 mc.moved_charge = 0;
5794         }
5795         /* we must fixup refcnts and charges */
5796         if (mc.moved_swap) {
5797                 /* uncharge swap account from the old cgroup */
5798                 if (!mem_cgroup_is_root(mc.from))
5799                         page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5800
5801                 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5802
5803                 /*
5804                  * we charged both to->memory and to->memsw, so we
5805                  * should uncharge to->memory.
5806                  */
5807                 if (!mem_cgroup_is_root(mc.to))
5808                         page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5809
5810                 mc.moved_swap = 0;
5811         }
5812         memcg_oom_recover(from);
5813         memcg_oom_recover(to);
5814         wake_up_all(&mc.waitq);
5815 }
5816
5817 static void mem_cgroup_clear_mc(void)
5818 {
5819         struct mm_struct *mm = mc.mm;
5820
5821         /*
5822          * we must clear moving_task before waking up waiters at the end of
5823          * task migration.
5824          */
5825         mc.moving_task = NULL;
5826         __mem_cgroup_clear_mc();
5827         spin_lock(&mc.lock);
5828         mc.from = NULL;
5829         mc.to = NULL;
5830         mc.mm = NULL;
5831         spin_unlock(&mc.lock);
5832
5833         mmput(mm);
5834 }
5835
5836 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5837 {
5838         struct cgroup_subsys_state *css;
5839         struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5840         struct mem_cgroup *from;
5841         struct task_struct *leader, *p;
5842         struct mm_struct *mm;
5843         unsigned long move_flags;
5844         int ret = 0;
5845
5846         /* charge immigration isn't supported on the default hierarchy */
5847         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5848                 return 0;
5849
5850         /*
5851          * Multi-process migrations only happen on the default hierarchy
5852          * where charge immigration is not used.  Perform charge
5853          * immigration if @tset contains a leader and whine if there are
5854          * multiple.
5855          */
5856         p = NULL;
5857         cgroup_taskset_for_each_leader(leader, css, tset) {
5858                 WARN_ON_ONCE(p);
5859                 p = leader;
5860                 memcg = mem_cgroup_from_css(css);
5861         }
5862         if (!p)
5863                 return 0;
5864
5865         /*
5866          * We are now committed to this value whatever it is. Changes in this
5867          * tunable will only affect upcoming migrations, not the current one.
5868          * So we need to save it, and keep it going.
5869          */
5870         move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5871         if (!move_flags)
5872                 return 0;
5873
5874         from = mem_cgroup_from_task(p);
5875
5876         VM_BUG_ON(from == memcg);
5877
5878         mm = get_task_mm(p);
5879         if (!mm)
5880                 return 0;
5881         /* We move charges only when we move a owner of the mm */
5882         if (mm->owner == p) {
5883                 VM_BUG_ON(mc.from);
5884                 VM_BUG_ON(mc.to);
5885                 VM_BUG_ON(mc.precharge);
5886                 VM_BUG_ON(mc.moved_charge);
5887                 VM_BUG_ON(mc.moved_swap);
5888
5889                 spin_lock(&mc.lock);
5890                 mc.mm = mm;
5891                 mc.from = from;
5892                 mc.to = memcg;
5893                 mc.flags = move_flags;
5894                 spin_unlock(&mc.lock);
5895                 /* We set mc.moving_task later */
5896
5897                 ret = mem_cgroup_precharge_mc(mm);
5898                 if (ret)
5899                         mem_cgroup_clear_mc();
5900         } else {
5901                 mmput(mm);
5902         }
5903         return ret;
5904 }
5905
5906 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5907 {
5908         if (mc.to)
5909                 mem_cgroup_clear_mc();
5910 }
5911
5912 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5913                                 unsigned long addr, unsigned long end,
5914                                 struct mm_walk *walk)
5915 {
5916         int ret = 0;
5917         struct vm_area_struct *vma = walk->vma;
5918         pte_t *pte;
5919         spinlock_t *ptl;
5920         enum mc_target_type target_type;
5921         union mc_target target;
5922         struct page *page;
5923
5924         ptl = pmd_trans_huge_lock(pmd, vma);
5925         if (ptl) {
5926                 if (mc.precharge < HPAGE_PMD_NR) {
5927                         spin_unlock(ptl);
5928                         return 0;
5929                 }
5930                 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
5931                 if (target_type == MC_TARGET_PAGE) {
5932                         page = target.page;
5933                         if (!isolate_lru_page(page)) {
5934                                 if (!mem_cgroup_move_account(page, true,
5935                                                              mc.from, mc.to)) {
5936                                         mc.precharge -= HPAGE_PMD_NR;
5937                                         mc.moved_charge += HPAGE_PMD_NR;
5938                                 }
5939                                 putback_lru_page(page);
5940                         }
5941                         put_page(page);
5942                 } else if (target_type == MC_TARGET_DEVICE) {
5943                         page = target.page;
5944                         if (!mem_cgroup_move_account(page, true,
5945                                                      mc.from, mc.to)) {
5946                                 mc.precharge -= HPAGE_PMD_NR;
5947                                 mc.moved_charge += HPAGE_PMD_NR;
5948                         }
5949                         put_page(page);
5950                 }
5951                 spin_unlock(ptl);
5952                 return 0;
5953         }
5954
5955         if (pmd_trans_unstable(pmd))
5956                 return 0;
5957 retry:
5958         pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5959         for (; addr != end; addr += PAGE_SIZE) {
5960                 pte_t ptent = *(pte++);
5961                 bool device = false;
5962                 swp_entry_t ent;
5963
5964                 if (!mc.precharge)
5965                         break;
5966
5967                 switch (get_mctgt_type(vma, addr, ptent, &target)) {
5968                 case MC_TARGET_DEVICE:
5969                         device = true;
5970                         fallthrough;
5971                 case MC_TARGET_PAGE:
5972                         page = target.page;
5973                         /*
5974                          * We can have a part of the split pmd here. Moving it
5975                          * can be done but it would be too convoluted so simply
5976                          * ignore such a partial THP and keep it in original
5977                          * memcg. There should be somebody mapping the head.
5978                          */
5979                         if (PageTransCompound(page))
5980                                 goto put;
5981                         if (!device && isolate_lru_page(page))
5982                                 goto put;
5983                         if (!mem_cgroup_move_account(page, false,
5984                                                 mc.from, mc.to)) {
5985                                 mc.precharge--;
5986                                 /* we uncharge from mc.from later. */
5987                                 mc.moved_charge++;
5988                         }
5989                         if (!device)
5990                                 putback_lru_page(page);
5991 put:                    /* get_mctgt_type() gets the page */
5992                         put_page(page);
5993                         break;
5994                 case MC_TARGET_SWAP:
5995                         ent = target.ent;
5996                         if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
5997                                 mc.precharge--;
5998                                 mem_cgroup_id_get_many(mc.to, 1);
5999                                 /* we fixup other refcnts and charges later. */
6000                                 mc.moved_swap++;
6001                         }
6002                         break;
6003                 default:
6004                         break;
6005                 }
6006         }
6007         pte_unmap_unlock(pte - 1, ptl);
6008         cond_resched();
6009
6010         if (addr != end) {
6011                 /*
6012                  * We have consumed all precharges we got in can_attach().
6013                  * We try charge one by one, but don't do any additional
6014                  * charges to mc.to if we have failed in charge once in attach()
6015                  * phase.
6016                  */
6017                 ret = mem_cgroup_do_precharge(1);
6018                 if (!ret)
6019                         goto retry;
6020         }
6021
6022         return ret;
6023 }
6024
6025 static const struct mm_walk_ops charge_walk_ops = {
6026         .pmd_entry      = mem_cgroup_move_charge_pte_range,
6027 };
6028
6029 static void mem_cgroup_move_charge(void)
6030 {
6031         lru_add_drain_all();
6032         /*
6033          * Signal lock_page_memcg() to take the memcg's move_lock
6034          * while we're moving its pages to another memcg. Then wait
6035          * for already started RCU-only updates to finish.
6036          */
6037         atomic_inc(&mc.from->moving_account);
6038         synchronize_rcu();
6039 retry:
6040         if (unlikely(!mmap_read_trylock(mc.mm))) {
6041                 /*
6042                  * Someone who are holding the mmap_lock might be waiting in
6043                  * waitq. So we cancel all extra charges, wake up all waiters,
6044                  * and retry. Because we cancel precharges, we might not be able
6045                  * to move enough charges, but moving charge is a best-effort
6046                  * feature anyway, so it wouldn't be a big problem.
6047                  */
6048                 __mem_cgroup_clear_mc();
6049                 cond_resched();
6050                 goto retry;
6051         }
6052         /*
6053          * When we have consumed all precharges and failed in doing
6054          * additional charge, the page walk just aborts.
6055          */
6056         walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
6057                         NULL);
6058
6059         mmap_read_unlock(mc.mm);
6060         atomic_dec(&mc.from->moving_account);
6061 }
6062
6063 static void mem_cgroup_move_task(void)
6064 {
6065         if (mc.to) {
6066                 mem_cgroup_move_charge();
6067                 mem_cgroup_clear_mc();
6068         }
6069 }
6070 #else   /* !CONFIG_MMU */
6071 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6072 {
6073         return 0;
6074 }
6075 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6076 {
6077 }
6078 static void mem_cgroup_move_task(void)
6079 {
6080 }
6081 #endif
6082
6083 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6084 {
6085         if (value == PAGE_COUNTER_MAX)
6086                 seq_puts(m, "max\n");
6087         else
6088                 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6089
6090         return 0;
6091 }
6092
6093 static u64 memory_current_read(struct cgroup_subsys_state *css,
6094                                struct cftype *cft)
6095 {
6096         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6097
6098         return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6099 }
6100
6101 static int memory_min_show(struct seq_file *m, void *v)
6102 {
6103         return seq_puts_memcg_tunable(m,
6104                 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6105 }
6106
6107 static ssize_t memory_min_write(struct kernfs_open_file *of,
6108                                 char *buf, size_t nbytes, loff_t off)
6109 {
6110         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6111         unsigned long min;
6112         int err;
6113
6114         buf = strstrip(buf);
6115         err = page_counter_memparse(buf, "max", &min);
6116         if (err)
6117                 return err;
6118
6119         page_counter_set_min(&memcg->memory, min);
6120
6121         return nbytes;
6122 }
6123
6124 static int memory_low_show(struct seq_file *m, void *v)
6125 {
6126         return seq_puts_memcg_tunable(m,
6127                 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6128 }
6129
6130 static ssize_t memory_low_write(struct kernfs_open_file *of,
6131                                 char *buf, size_t nbytes, loff_t off)
6132 {
6133         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6134         unsigned long low;
6135         int err;
6136
6137         buf = strstrip(buf);
6138         err = page_counter_memparse(buf, "max", &low);
6139         if (err)
6140                 return err;
6141
6142         page_counter_set_low(&memcg->memory, low);
6143
6144         return nbytes;
6145 }
6146
6147 static int memory_high_show(struct seq_file *m, void *v)
6148 {
6149         return seq_puts_memcg_tunable(m,
6150                 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6151 }
6152
6153 static ssize_t memory_high_write(struct kernfs_open_file *of,
6154                                  char *buf, size_t nbytes, loff_t off)
6155 {
6156         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6157         unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6158         bool drained = false;
6159         unsigned long high;
6160         int err;
6161
6162         buf = strstrip(buf);
6163         err = page_counter_memparse(buf, "max", &high);
6164         if (err)
6165                 return err;
6166
6167         page_counter_set_high(&memcg->memory, high);
6168
6169         for (;;) {
6170                 unsigned long nr_pages = page_counter_read(&memcg->memory);
6171                 unsigned long reclaimed;
6172
6173                 if (nr_pages <= high)
6174                         break;
6175
6176                 if (signal_pending(current))
6177                         break;
6178
6179                 if (!drained) {
6180                         drain_all_stock(memcg);
6181                         drained = true;
6182                         continue;
6183                 }
6184
6185                 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6186                                                          GFP_KERNEL, true);
6187
6188                 if (!reclaimed && !nr_retries--)
6189                         break;
6190         }
6191
6192         memcg_wb_domain_size_changed(memcg);
6193         return nbytes;
6194 }
6195
6196 static int memory_max_show(struct seq_file *m, void *v)
6197 {
6198         return seq_puts_memcg_tunable(m,
6199                 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6200 }
6201
6202 static ssize_t memory_max_write(struct kernfs_open_file *of,
6203                                 char *buf, size_t nbytes, loff_t off)
6204 {
6205         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6206         unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6207         bool drained = false;
6208         unsigned long max;
6209         int err;
6210
6211         buf = strstrip(buf);
6212         err = page_counter_memparse(buf, "max", &max);
6213         if (err)
6214                 return err;
6215
6216         xchg(&memcg->memory.max, max);
6217
6218         for (;;) {
6219                 unsigned long nr_pages = page_counter_read(&memcg->memory);
6220
6221                 if (nr_pages <= max)
6222                         break;
6223
6224                 if (signal_pending(current))
6225                         break;
6226
6227                 if (!drained) {
6228                         drain_all_stock(memcg);
6229                         drained = true;
6230                         continue;
6231                 }
6232
6233                 if (nr_reclaims) {
6234                         if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6235                                                           GFP_KERNEL, true))
6236                                 nr_reclaims--;
6237                         continue;
6238                 }
6239
6240                 memcg_memory_event(memcg, MEMCG_OOM);
6241                 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6242                         break;
6243         }
6244
6245         memcg_wb_domain_size_changed(memcg);
6246         return nbytes;
6247 }
6248
6249 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6250 {
6251         seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6252         seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6253         seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6254         seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6255         seq_printf(m, "oom_kill %lu\n",
6256                    atomic_long_read(&events[MEMCG_OOM_KILL]));
6257         seq_printf(m, "oom_group_kill %lu\n",
6258                    atomic_long_read(&events[MEMCG_OOM_GROUP_KILL]));
6259 }
6260
6261 static int memory_events_show(struct seq_file *m, void *v)
6262 {
6263         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6264
6265         __memory_events_show(m, memcg->memory_events);
6266         return 0;
6267 }
6268
6269 static int memory_events_local_show(struct seq_file *m, void *v)
6270 {
6271         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6272
6273         __memory_events_show(m, memcg->memory_events_local);
6274         return 0;
6275 }
6276
6277 static int memory_stat_show(struct seq_file *m, void *v)
6278 {
6279         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6280         char *buf;
6281
6282         buf = memory_stat_format(memcg);
6283         if (!buf)
6284                 return -ENOMEM;
6285         seq_puts(m, buf);
6286         kfree(buf);
6287         return 0;
6288 }
6289
6290 #ifdef CONFIG_NUMA
6291 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6292                                                      int item)
6293 {
6294         return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item);
6295 }
6296
6297 static int memory_numa_stat_show(struct seq_file *m, void *v)
6298 {
6299         int i;
6300         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6301
6302         mem_cgroup_flush_stats();
6303
6304         for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6305                 int nid;
6306
6307                 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6308                         continue;
6309
6310                 seq_printf(m, "%s", memory_stats[i].name);
6311                 for_each_node_state(nid, N_MEMORY) {
6312                         u64 size;
6313                         struct lruvec *lruvec;
6314
6315                         lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6316                         size = lruvec_page_state_output(lruvec,
6317                                                         memory_stats[i].idx);
6318                         seq_printf(m, " N%d=%llu", nid, size);
6319                 }
6320                 seq_putc(m, '\n');
6321         }
6322
6323         return 0;
6324 }
6325 #endif
6326
6327 static int memory_oom_group_show(struct seq_file *m, void *v)
6328 {
6329         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6330
6331         seq_printf(m, "%d\n", memcg->oom_group);
6332
6333         return 0;
6334 }
6335
6336 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6337                                       char *buf, size_t nbytes, loff_t off)
6338 {
6339         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6340         int ret, oom_group;
6341
6342         buf = strstrip(buf);
6343         if (!buf)
6344                 return -EINVAL;
6345
6346         ret = kstrtoint(buf, 0, &oom_group);
6347         if (ret)
6348                 return ret;
6349
6350         if (oom_group != 0 && oom_group != 1)
6351                 return -EINVAL;
6352
6353         memcg->oom_group = oom_group;
6354
6355         return nbytes;
6356 }
6357
6358 static struct cftype memory_files[] = {
6359         {
6360                 .name = "current",
6361                 .flags = CFTYPE_NOT_ON_ROOT,
6362                 .read_u64 = memory_current_read,
6363         },
6364         {
6365                 .name = "min",
6366                 .flags = CFTYPE_NOT_ON_ROOT,
6367                 .seq_show = memory_min_show,
6368                 .write = memory_min_write,
6369         },
6370         {
6371                 .name = "low",
6372                 .flags = CFTYPE_NOT_ON_ROOT,
6373                 .seq_show = memory_low_show,
6374                 .write = memory_low_write,
6375         },
6376         {
6377                 .name = "high",
6378                 .flags = CFTYPE_NOT_ON_ROOT,
6379                 .seq_show = memory_high_show,
6380                 .write = memory_high_write,
6381         },
6382         {
6383                 .name = "max",
6384                 .flags = CFTYPE_NOT_ON_ROOT,
6385                 .seq_show = memory_max_show,
6386                 .write = memory_max_write,
6387         },
6388         {
6389                 .name = "events",
6390                 .flags = CFTYPE_NOT_ON_ROOT,
6391                 .file_offset = offsetof(struct mem_cgroup, events_file),
6392                 .seq_show = memory_events_show,
6393         },
6394         {
6395                 .name = "events.local",
6396                 .flags = CFTYPE_NOT_ON_ROOT,
6397                 .file_offset = offsetof(struct mem_cgroup, events_local_file),
6398                 .seq_show = memory_events_local_show,
6399         },
6400         {
6401                 .name = "stat",
6402                 .seq_show = memory_stat_show,
6403         },
6404 #ifdef CONFIG_NUMA
6405         {
6406                 .name = "numa_stat",
6407                 .seq_show = memory_numa_stat_show,
6408         },
6409 #endif
6410         {
6411                 .name = "oom.group",
6412                 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6413                 .seq_show = memory_oom_group_show,
6414                 .write = memory_oom_group_write,
6415         },
6416         { }     /* terminate */
6417 };
6418
6419 struct cgroup_subsys memory_cgrp_subsys = {
6420         .css_alloc = mem_cgroup_css_alloc,
6421         .css_online = mem_cgroup_css_online,
6422         .css_offline = mem_cgroup_css_offline,
6423         .css_released = mem_cgroup_css_released,
6424         .css_free = mem_cgroup_css_free,
6425         .css_reset = mem_cgroup_css_reset,
6426         .css_rstat_flush = mem_cgroup_css_rstat_flush,
6427         .can_attach = mem_cgroup_can_attach,
6428         .cancel_attach = mem_cgroup_cancel_attach,
6429         .post_attach = mem_cgroup_move_task,
6430         .dfl_cftypes = memory_files,
6431         .legacy_cftypes = mem_cgroup_legacy_files,
6432         .early_init = 0,
6433 };
6434
6435 /*
6436  * This function calculates an individual cgroup's effective
6437  * protection which is derived from its own memory.min/low, its
6438  * parent's and siblings' settings, as well as the actual memory
6439  * distribution in the tree.
6440  *
6441  * The following rules apply to the effective protection values:
6442  *
6443  * 1. At the first level of reclaim, effective protection is equal to
6444  *    the declared protection in memory.min and memory.low.
6445  *
6446  * 2. To enable safe delegation of the protection configuration, at
6447  *    subsequent levels the effective protection is capped to the
6448  *    parent's effective protection.
6449  *
6450  * 3. To make complex and dynamic subtrees easier to configure, the
6451  *    user is allowed to overcommit the declared protection at a given
6452  *    level. If that is the case, the parent's effective protection is
6453  *    distributed to the children in proportion to how much protection
6454  *    they have declared and how much of it they are utilizing.
6455  *
6456  *    This makes distribution proportional, but also work-conserving:
6457  *    if one cgroup claims much more protection than it uses memory,
6458  *    the unused remainder is available to its siblings.
6459  *
6460  * 4. Conversely, when the declared protection is undercommitted at a
6461  *    given level, the distribution of the larger parental protection
6462  *    budget is NOT proportional. A cgroup's protection from a sibling
6463  *    is capped to its own memory.min/low setting.
6464  *
6465  * 5. However, to allow protecting recursive subtrees from each other
6466  *    without having to declare each individual cgroup's fixed share
6467  *    of the ancestor's claim to protection, any unutilized -
6468  *    "floating" - protection from up the tree is distributed in
6469  *    proportion to each cgroup's *usage*. This makes the protection
6470  *    neutral wrt sibling cgroups and lets them compete freely over
6471  *    the shared parental protection budget, but it protects the
6472  *    subtree as a whole from neighboring subtrees.
6473  *
6474  * Note that 4. and 5. are not in conflict: 4. is about protecting
6475  * against immediate siblings whereas 5. is about protecting against
6476  * neighboring subtrees.
6477  */
6478 static unsigned long effective_protection(unsigned long usage,
6479                                           unsigned long parent_usage,
6480                                           unsigned long setting,
6481                                           unsigned long parent_effective,
6482                                           unsigned long siblings_protected)
6483 {
6484         unsigned long protected;
6485         unsigned long ep;
6486
6487         protected = min(usage, setting);
6488         /*
6489          * If all cgroups at this level combined claim and use more
6490          * protection then what the parent affords them, distribute
6491          * shares in proportion to utilization.
6492          *
6493          * We are using actual utilization rather than the statically
6494          * claimed protection in order to be work-conserving: claimed
6495          * but unused protection is available to siblings that would
6496          * otherwise get a smaller chunk than what they claimed.
6497          */
6498         if (siblings_protected > parent_effective)
6499                 return protected * parent_effective / siblings_protected;
6500
6501         /*
6502          * Ok, utilized protection of all children is within what the
6503          * parent affords them, so we know whatever this child claims
6504          * and utilizes is effectively protected.
6505          *
6506          * If there is unprotected usage beyond this value, reclaim
6507          * will apply pressure in proportion to that amount.
6508          *
6509          * If there is unutilized protection, the cgroup will be fully
6510          * shielded from reclaim, but we do return a smaller value for
6511          * protection than what the group could enjoy in theory. This
6512          * is okay. With the overcommit distribution above, effective
6513          * protection is always dependent on how memory is actually
6514          * consumed among the siblings anyway.
6515          */
6516         ep = protected;
6517
6518         /*
6519          * If the children aren't claiming (all of) the protection
6520          * afforded to them by the parent, distribute the remainder in
6521          * proportion to the (unprotected) memory of each cgroup. That
6522          * way, cgroups that aren't explicitly prioritized wrt each
6523          * other compete freely over the allowance, but they are
6524          * collectively protected from neighboring trees.
6525          *
6526          * We're using unprotected memory for the weight so that if
6527          * some cgroups DO claim explicit protection, we don't protect
6528          * the same bytes twice.
6529          *
6530          * Check both usage and parent_usage against the respective
6531          * protected values. One should imply the other, but they
6532          * aren't read atomically - make sure the division is sane.
6533          */
6534         if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6535                 return ep;
6536         if (parent_effective > siblings_protected &&
6537             parent_usage > siblings_protected &&
6538             usage > protected) {
6539                 unsigned long unclaimed;
6540
6541                 unclaimed = parent_effective - siblings_protected;
6542                 unclaimed *= usage - protected;
6543                 unclaimed /= parent_usage - siblings_protected;
6544
6545                 ep += unclaimed;
6546         }
6547
6548         return ep;
6549 }
6550
6551 /**
6552  * mem_cgroup_calculate_protection - check if memory consumption is in the normal range
6553  * @root: the top ancestor of the sub-tree being checked
6554  * @memcg: the memory cgroup to check
6555  *
6556  * WARNING: This function is not stateless! It can only be used as part
6557  *          of a top-down tree iteration, not for isolated queries.
6558  */
6559 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6560                                      struct mem_cgroup *memcg)
6561 {
6562         unsigned long usage, parent_usage;
6563         struct mem_cgroup *parent;
6564
6565         if (mem_cgroup_disabled())
6566                 return;
6567
6568         if (!root)
6569                 root = root_mem_cgroup;
6570
6571         /*
6572          * Effective values of the reclaim targets are ignored so they
6573          * can be stale. Have a look at mem_cgroup_protection for more
6574          * details.
6575          * TODO: calculation should be more robust so that we do not need
6576          * that special casing.
6577          */
6578         if (memcg == root)
6579                 return;
6580
6581         usage = page_counter_read(&memcg->memory);
6582         if (!usage)
6583                 return;
6584
6585         parent = parent_mem_cgroup(memcg);
6586         /* No parent means a non-hierarchical mode on v1 memcg */
6587         if (!parent)
6588                 return;
6589
6590         if (parent == root) {
6591                 memcg->memory.emin = READ_ONCE(memcg->memory.min);
6592                 memcg->memory.elow = READ_ONCE(memcg->memory.low);
6593                 return;
6594         }
6595
6596         parent_usage = page_counter_read(&parent->memory);
6597
6598         WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6599                         READ_ONCE(memcg->memory.min),
6600                         READ_ONCE(parent->memory.emin),
6601                         atomic_long_read(&parent->memory.children_min_usage)));
6602
6603         WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6604                         READ_ONCE(memcg->memory.low),
6605                         READ_ONCE(parent->memory.elow),
6606                         atomic_long_read(&parent->memory.children_low_usage)));
6607 }
6608
6609 static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg,
6610                         gfp_t gfp)
6611 {
6612         long nr_pages = folio_nr_pages(folio);
6613         int ret;
6614
6615         ret = try_charge(memcg, gfp, nr_pages);
6616         if (ret)
6617                 goto out;
6618
6619         css_get(&memcg->css);
6620         commit_charge(folio, memcg);
6621
6622         local_irq_disable();
6623         mem_cgroup_charge_statistics(memcg, nr_pages);
6624         memcg_check_events(memcg, folio_nid(folio));
6625         local_irq_enable();
6626 out:
6627         return ret;
6628 }
6629
6630 int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp)
6631 {
6632         struct mem_cgroup *memcg;
6633         int ret;
6634
6635         memcg = get_mem_cgroup_from_mm(mm);
6636         ret = charge_memcg(folio, memcg, gfp);
6637         css_put(&memcg->css);
6638
6639         return ret;
6640 }
6641
6642 /**
6643  * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin
6644  * @page: page to charge
6645  * @mm: mm context of the victim
6646  * @gfp: reclaim mode
6647  * @entry: swap entry for which the page is allocated
6648  *
6649  * This function charges a page allocated for swapin. Please call this before
6650  * adding the page to the swapcache.
6651  *
6652  * Returns 0 on success. Otherwise, an error code is returned.
6653  */
6654 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
6655                                   gfp_t gfp, swp_entry_t entry)
6656 {
6657         struct folio *folio = page_folio(page);
6658         struct mem_cgroup *memcg;
6659         unsigned short id;
6660         int ret;
6661
6662         if (mem_cgroup_disabled())
6663                 return 0;
6664
6665         id = lookup_swap_cgroup_id(entry);
6666         rcu_read_lock();
6667         memcg = mem_cgroup_from_id(id);
6668         if (!memcg || !css_tryget_online(&memcg->css))
6669                 memcg = get_mem_cgroup_from_mm(mm);
6670         rcu_read_unlock();
6671
6672         ret = charge_memcg(folio, memcg, gfp);
6673
6674         css_put(&memcg->css);
6675         return ret;
6676 }
6677
6678 /*
6679  * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
6680  * @entry: swap entry for which the page is charged
6681  *
6682  * Call this function after successfully adding the charged page to swapcache.
6683  *
6684  * Note: This function assumes the page for which swap slot is being uncharged
6685  * is order 0 page.
6686  */
6687 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
6688 {
6689         /*
6690          * Cgroup1's unified memory+swap counter has been charged with the
6691          * new swapcache page, finish the transfer by uncharging the swap
6692          * slot. The swap slot would also get uncharged when it dies, but
6693          * it can stick around indefinitely and we'd count the page twice
6694          * the entire time.
6695          *
6696          * Cgroup2 has separate resource counters for memory and swap,
6697          * so this is a non-issue here. Memory and swap charge lifetimes
6698          * correspond 1:1 to page and swap slot lifetimes: we charge the
6699          * page to memory here, and uncharge swap when the slot is freed.
6700          */
6701         if (!mem_cgroup_disabled() && do_memsw_account()) {
6702                 /*
6703                  * The swap entry might not get freed for a long time,
6704                  * let's not wait for it.  The page already received a
6705                  * memory+swap charge, drop the swap entry duplicate.
6706                  */
6707                 mem_cgroup_uncharge_swap(entry, 1);
6708         }
6709 }
6710
6711 struct uncharge_gather {
6712         struct mem_cgroup *memcg;
6713         unsigned long nr_memory;
6714         unsigned long pgpgout;
6715         unsigned long nr_kmem;
6716         int nid;
6717 };
6718
6719 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6720 {
6721         memset(ug, 0, sizeof(*ug));
6722 }
6723
6724 static void uncharge_batch(const struct uncharge_gather *ug)
6725 {
6726         unsigned long flags;
6727
6728         if (ug->nr_memory) {
6729                 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
6730                 if (do_memsw_account())
6731                         page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
6732                 if (ug->nr_kmem)
6733                         memcg_account_kmem(ug->memcg, -ug->nr_kmem);
6734                 memcg_oom_recover(ug->memcg);
6735         }
6736
6737         local_irq_save(flags);
6738         __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6739         __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
6740         memcg_check_events(ug->memcg, ug->nid);
6741         local_irq_restore(flags);
6742
6743         /* drop reference from uncharge_folio */
6744         css_put(&ug->memcg->css);
6745 }
6746
6747 static void uncharge_folio(struct folio *folio, struct uncharge_gather *ug)
6748 {
6749         long nr_pages;
6750         struct mem_cgroup *memcg;
6751         struct obj_cgroup *objcg;
6752
6753         VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
6754
6755         /*
6756          * Nobody should be changing or seriously looking at
6757          * folio memcg or objcg at this point, we have fully
6758          * exclusive access to the folio.
6759          */
6760         if (folio_memcg_kmem(folio)) {
6761                 objcg = __folio_objcg(folio);
6762                 /*
6763                  * This get matches the put at the end of the function and
6764                  * kmem pages do not hold memcg references anymore.
6765                  */
6766                 memcg = get_mem_cgroup_from_objcg(objcg);
6767         } else {
6768                 memcg = __folio_memcg(folio);
6769         }
6770
6771         if (!memcg)
6772                 return;
6773
6774         if (ug->memcg != memcg) {
6775                 if (ug->memcg) {
6776                         uncharge_batch(ug);
6777                         uncharge_gather_clear(ug);
6778                 }
6779                 ug->memcg = memcg;
6780                 ug->nid = folio_nid(folio);
6781
6782                 /* pairs with css_put in uncharge_batch */
6783                 css_get(&memcg->css);
6784         }
6785
6786         nr_pages = folio_nr_pages(folio);
6787
6788         if (folio_memcg_kmem(folio)) {
6789                 ug->nr_memory += nr_pages;
6790                 ug->nr_kmem += nr_pages;
6791
6792                 folio->memcg_data = 0;
6793                 obj_cgroup_put(objcg);
6794         } else {
6795                 /* LRU pages aren't accounted at the root level */
6796                 if (!mem_cgroup_is_root(memcg))
6797                         ug->nr_memory += nr_pages;
6798                 ug->pgpgout++;
6799
6800                 folio->memcg_data = 0;
6801         }
6802
6803         css_put(&memcg->css);
6804 }
6805
6806 void __mem_cgroup_uncharge(struct folio *folio)
6807 {
6808         struct uncharge_gather ug;
6809
6810         /* Don't touch folio->lru of any random page, pre-check: */
6811         if (!folio_memcg(folio))
6812                 return;
6813
6814         uncharge_gather_clear(&ug);
6815         uncharge_folio(folio, &ug);
6816         uncharge_batch(&ug);
6817 }
6818
6819 /**
6820  * __mem_cgroup_uncharge_list - uncharge a list of page
6821  * @page_list: list of pages to uncharge
6822  *
6823  * Uncharge a list of pages previously charged with
6824  * __mem_cgroup_charge().
6825  */
6826 void __mem_cgroup_uncharge_list(struct list_head *page_list)
6827 {
6828         struct uncharge_gather ug;
6829         struct folio *folio;
6830
6831         uncharge_gather_clear(&ug);
6832         list_for_each_entry(folio, page_list, lru)
6833                 uncharge_folio(folio, &ug);
6834         if (ug.memcg)
6835                 uncharge_batch(&ug);
6836 }
6837
6838 /**
6839  * mem_cgroup_migrate - Charge a folio's replacement.
6840  * @old: Currently circulating folio.
6841  * @new: Replacement folio.
6842  *
6843  * Charge @new as a replacement folio for @old. @old will
6844  * be uncharged upon free.
6845  *
6846  * Both folios must be locked, @new->mapping must be set up.
6847  */
6848 void mem_cgroup_migrate(struct folio *old, struct folio *new)
6849 {
6850         struct mem_cgroup *memcg;
6851         long nr_pages = folio_nr_pages(new);
6852         unsigned long flags;
6853
6854         VM_BUG_ON_FOLIO(!folio_test_locked(old), old);
6855         VM_BUG_ON_FOLIO(!folio_test_locked(new), new);
6856         VM_BUG_ON_FOLIO(folio_test_anon(old) != folio_test_anon(new), new);
6857         VM_BUG_ON_FOLIO(folio_nr_pages(old) != nr_pages, new);
6858
6859         if (mem_cgroup_disabled())
6860                 return;
6861
6862         /* Page cache replacement: new folio already charged? */
6863         if (folio_memcg(new))
6864                 return;
6865
6866         memcg = folio_memcg(old);
6867         VM_WARN_ON_ONCE_FOLIO(!memcg, old);
6868         if (!memcg)
6869                 return;
6870
6871         /* Force-charge the new page. The old one will be freed soon */
6872         if (!mem_cgroup_is_root(memcg)) {
6873                 page_counter_charge(&memcg->memory, nr_pages);
6874                 if (do_memsw_account())
6875                         page_counter_charge(&memcg->memsw, nr_pages);
6876         }
6877
6878         css_get(&memcg->css);
6879         commit_charge(new, memcg);
6880
6881         local_irq_save(flags);
6882         mem_cgroup_charge_statistics(memcg, nr_pages);
6883         memcg_check_events(memcg, folio_nid(new));
6884         local_irq_restore(flags);
6885 }
6886
6887 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
6888 EXPORT_SYMBOL(memcg_sockets_enabled_key);
6889
6890 void mem_cgroup_sk_alloc(struct sock *sk)
6891 {
6892         struct mem_cgroup *memcg;
6893
6894         if (!mem_cgroup_sockets_enabled)
6895                 return;
6896
6897         /* Do not associate the sock with unrelated interrupted task's memcg. */
6898         if (!in_task())
6899                 return;
6900
6901         rcu_read_lock();
6902         memcg = mem_cgroup_from_task(current);
6903         if (memcg == root_mem_cgroup)
6904                 goto out;
6905         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
6906                 goto out;
6907         if (css_tryget(&memcg->css))
6908                 sk->sk_memcg = memcg;
6909 out:
6910         rcu_read_unlock();
6911 }
6912
6913 void mem_cgroup_sk_free(struct sock *sk)
6914 {
6915         if (sk->sk_memcg)
6916                 css_put(&sk->sk_memcg->css);
6917 }
6918
6919 /**
6920  * mem_cgroup_charge_skmem - charge socket memory
6921  * @memcg: memcg to charge
6922  * @nr_pages: number of pages to charge
6923  * @gfp_mask: reclaim mode
6924  *
6925  * Charges @nr_pages to @memcg. Returns %true if the charge fit within
6926  * @memcg's configured limit, %false if it doesn't.
6927  */
6928 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages,
6929                              gfp_t gfp_mask)
6930 {
6931         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6932                 struct page_counter *fail;
6933
6934                 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
6935                         memcg->tcpmem_pressure = 0;
6936                         return true;
6937                 }
6938                 memcg->tcpmem_pressure = 1;
6939                 if (gfp_mask & __GFP_NOFAIL) {
6940                         page_counter_charge(&memcg->tcpmem, nr_pages);
6941                         return true;
6942                 }
6943                 return false;
6944         }
6945
6946         if (try_charge(memcg, gfp_mask, nr_pages) == 0) {
6947                 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
6948                 return true;
6949         }
6950
6951         return false;
6952 }
6953
6954 /**
6955  * mem_cgroup_uncharge_skmem - uncharge socket memory
6956  * @memcg: memcg to uncharge
6957  * @nr_pages: number of pages to uncharge
6958  */
6959 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
6960 {
6961         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
6962                 page_counter_uncharge(&memcg->tcpmem, nr_pages);
6963                 return;
6964         }
6965
6966         mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
6967
6968         refill_stock(memcg, nr_pages);
6969 }
6970
6971 static int __init cgroup_memory(char *s)
6972 {
6973         char *token;
6974
6975         while ((token = strsep(&s, ",")) != NULL) {
6976                 if (!*token)
6977                         continue;
6978                 if (!strcmp(token, "nosocket"))
6979                         cgroup_memory_nosocket = true;
6980                 if (!strcmp(token, "nokmem"))
6981                         cgroup_memory_nokmem = true;
6982         }
6983         return 1;
6984 }
6985 __setup("cgroup.memory=", cgroup_memory);
6986
6987 /*
6988  * subsys_initcall() for memory controller.
6989  *
6990  * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
6991  * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
6992  * basically everything that doesn't depend on a specific mem_cgroup structure
6993  * should be initialized from here.
6994  */
6995 static int __init mem_cgroup_init(void)
6996 {
6997         int cpu, node;
6998
6999         /*
7000          * Currently s32 type (can refer to struct batched_lruvec_stat) is
7001          * used for per-memcg-per-cpu caching of per-node statistics. In order
7002          * to work fine, we should make sure that the overfill threshold can't
7003          * exceed S32_MAX / PAGE_SIZE.
7004          */
7005         BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7006
7007         cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7008                                   memcg_hotplug_cpu_dead);
7009
7010         for_each_possible_cpu(cpu)
7011                 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7012                           drain_local_stock);
7013
7014         for_each_node(node) {
7015                 struct mem_cgroup_tree_per_node *rtpn;
7016
7017                 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7018                                     node_online(node) ? node : NUMA_NO_NODE);
7019
7020                 rtpn->rb_root = RB_ROOT;
7021                 rtpn->rb_rightmost = NULL;
7022                 spin_lock_init(&rtpn->lock);
7023                 soft_limit_tree.rb_tree_per_node[node] = rtpn;
7024         }
7025
7026         return 0;
7027 }
7028 subsys_initcall(mem_cgroup_init);
7029
7030 #ifdef CONFIG_MEMCG_SWAP
7031 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7032 {
7033         while (!refcount_inc_not_zero(&memcg->id.ref)) {
7034                 /*
7035                  * The root cgroup cannot be destroyed, so it's refcount must
7036                  * always be >= 1.
7037                  */
7038                 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
7039                         VM_BUG_ON(1);
7040                         break;
7041                 }
7042                 memcg = parent_mem_cgroup(memcg);
7043                 if (!memcg)
7044                         memcg = root_mem_cgroup;
7045         }
7046         return memcg;
7047 }
7048
7049 /**
7050  * mem_cgroup_swapout - transfer a memsw charge to swap
7051  * @folio: folio whose memsw charge to transfer
7052  * @entry: swap entry to move the charge to
7053  *
7054  * Transfer the memsw charge of @folio to @entry.
7055  */
7056 void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
7057 {
7058         struct mem_cgroup *memcg, *swap_memcg;
7059         unsigned int nr_entries;
7060         unsigned short oldid;
7061
7062         VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
7063         VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
7064
7065         if (mem_cgroup_disabled())
7066                 return;
7067
7068         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7069                 return;
7070
7071         memcg = folio_memcg(folio);
7072
7073         VM_WARN_ON_ONCE_FOLIO(!memcg, folio);
7074         if (!memcg)
7075                 return;
7076
7077         /*
7078          * In case the memcg owning these pages has been offlined and doesn't
7079          * have an ID allocated to it anymore, charge the closest online
7080          * ancestor for the swap instead and transfer the memory+swap charge.
7081          */
7082         swap_memcg = mem_cgroup_id_get_online(memcg);
7083         nr_entries = folio_nr_pages(folio);
7084         /* Get references for the tail pages, too */
7085         if (nr_entries > 1)
7086                 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7087         oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7088                                    nr_entries);
7089         VM_BUG_ON_FOLIO(oldid, folio);
7090         mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7091
7092         folio->memcg_data = 0;
7093
7094         if (!mem_cgroup_is_root(memcg))
7095                 page_counter_uncharge(&memcg->memory, nr_entries);
7096
7097         if (!cgroup_memory_noswap && memcg != swap_memcg) {
7098                 if (!mem_cgroup_is_root(swap_memcg))
7099                         page_counter_charge(&swap_memcg->memsw, nr_entries);
7100                 page_counter_uncharge(&memcg->memsw, nr_entries);
7101         }
7102
7103         /*
7104          * Interrupts should be disabled here because the caller holds the
7105          * i_pages lock which is taken with interrupts-off. It is
7106          * important here to have the interrupts disabled because it is the
7107          * only synchronisation we have for updating the per-CPU variables.
7108          */
7109         memcg_stats_lock();
7110         mem_cgroup_charge_statistics(memcg, -nr_entries);
7111         memcg_stats_unlock();
7112         memcg_check_events(memcg, folio_nid(folio));
7113
7114         css_put(&memcg->css);
7115 }
7116
7117 /**
7118  * __mem_cgroup_try_charge_swap - try charging swap space for a page
7119  * @page: page being added to swap
7120  * @entry: swap entry to charge
7121  *
7122  * Try to charge @page's memcg for the swap space at @entry.
7123  *
7124  * Returns 0 on success, -ENOMEM on failure.
7125  */
7126 int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7127 {
7128         unsigned int nr_pages = thp_nr_pages(page);
7129         struct page_counter *counter;
7130         struct mem_cgroup *memcg;
7131         unsigned short oldid;
7132
7133         if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7134                 return 0;
7135
7136         memcg = page_memcg(page);
7137
7138         VM_WARN_ON_ONCE_PAGE(!memcg, page);
7139         if (!memcg)
7140                 return 0;
7141
7142         if (!entry.val) {
7143                 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7144                 return 0;
7145         }
7146
7147         memcg = mem_cgroup_id_get_online(memcg);
7148
7149         if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
7150             !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7151                 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7152                 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7153                 mem_cgroup_id_put(memcg);
7154                 return -ENOMEM;
7155         }
7156
7157         /* Get references for the tail pages, too */
7158         if (nr_pages > 1)
7159                 mem_cgroup_id_get_many(memcg, nr_pages - 1);
7160         oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7161         VM_BUG_ON_PAGE(oldid, page);
7162         mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7163
7164         return 0;
7165 }
7166
7167 /**
7168  * __mem_cgroup_uncharge_swap - uncharge swap space
7169  * @entry: swap entry to uncharge
7170  * @nr_pages: the amount of swap space to uncharge
7171  */
7172 void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7173 {
7174         struct mem_cgroup *memcg;
7175         unsigned short id;
7176
7177         id = swap_cgroup_record(entry, 0, nr_pages);
7178         rcu_read_lock();
7179         memcg = mem_cgroup_from_id(id);
7180         if (memcg) {
7181                 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
7182                         if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7183                                 page_counter_uncharge(&memcg->swap, nr_pages);
7184                         else
7185                                 page_counter_uncharge(&memcg->memsw, nr_pages);
7186                 }
7187                 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7188                 mem_cgroup_id_put_many(memcg, nr_pages);
7189         }
7190         rcu_read_unlock();
7191 }
7192
7193 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7194 {
7195         long nr_swap_pages = get_nr_swap_pages();
7196
7197         if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7198                 return nr_swap_pages;
7199         for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7200                 nr_swap_pages = min_t(long, nr_swap_pages,
7201                                       READ_ONCE(memcg->swap.max) -
7202                                       page_counter_read(&memcg->swap));
7203         return nr_swap_pages;
7204 }
7205
7206 bool mem_cgroup_swap_full(struct page *page)
7207 {
7208         struct mem_cgroup *memcg;
7209
7210         VM_BUG_ON_PAGE(!PageLocked(page), page);
7211
7212         if (vm_swap_full())
7213                 return true;
7214         if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7215                 return false;
7216
7217         memcg = page_memcg(page);
7218         if (!memcg)
7219                 return false;
7220
7221         for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
7222                 unsigned long usage = page_counter_read(&memcg->swap);
7223
7224                 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7225                     usage * 2 >= READ_ONCE(memcg->swap.max))
7226                         return true;
7227         }
7228
7229         return false;
7230 }
7231
7232 static int __init setup_swap_account(char *s)
7233 {
7234         if (!strcmp(s, "1"))
7235                 cgroup_memory_noswap = false;
7236         else if (!strcmp(s, "0"))
7237                 cgroup_memory_noswap = true;
7238         return 1;
7239 }
7240 __setup("swapaccount=", setup_swap_account);
7241
7242 static u64 swap_current_read(struct cgroup_subsys_state *css,
7243                              struct cftype *cft)
7244 {
7245         struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7246
7247         return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7248 }
7249
7250 static int swap_high_show(struct seq_file *m, void *v)
7251 {
7252         return seq_puts_memcg_tunable(m,
7253                 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7254 }
7255
7256 static ssize_t swap_high_write(struct kernfs_open_file *of,
7257                                char *buf, size_t nbytes, loff_t off)
7258 {
7259         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7260         unsigned long high;
7261         int err;
7262
7263         buf = strstrip(buf);
7264         err = page_counter_memparse(buf, "max", &high);
7265         if (err)
7266                 return err;
7267
7268         page_counter_set_high(&memcg->swap, high);
7269
7270         return nbytes;
7271 }
7272
7273 static int swap_max_show(struct seq_file *m, void *v)
7274 {
7275         return seq_puts_memcg_tunable(m,
7276                 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7277 }
7278
7279 static ssize_t swap_max_write(struct kernfs_open_file *of,
7280                               char *buf, size_t nbytes, loff_t off)
7281 {
7282         struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7283         unsigned long max;
7284         int err;
7285
7286         buf = strstrip(buf);
7287         err = page_counter_memparse(buf, "max", &max);
7288         if (err)
7289                 return err;
7290
7291         xchg(&memcg->swap.max, max);
7292
7293         return nbytes;
7294 }
7295
7296 static int swap_events_show(struct seq_file *m, void *v)
7297 {
7298         struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7299
7300         seq_printf(m, "high %lu\n",
7301                    atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7302         seq_printf(m, "max %lu\n",
7303                    atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7304         seq_printf(m, "fail %lu\n",
7305                    atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7306
7307         return 0;
7308 }
7309
7310 static struct cftype swap_files[] = {
7311         {
7312                 .name = "swap.current",
7313                 .flags = CFTYPE_NOT_ON_ROOT,
7314                 .read_u64 = swap_current_read,
7315         },
7316         {
7317                 .name = "swap.high",
7318                 .flags = CFTYPE_NOT_ON_ROOT,
7319                 .seq_show = swap_high_show,
7320                 .write = swap_high_write,
7321         },
7322         {
7323                 .name = "swap.max",
7324                 .flags = CFTYPE_NOT_ON_ROOT,
7325                 .seq_show = swap_max_show,
7326                 .write = swap_max_write,
7327         },
7328         {
7329                 .name = "swap.events",
7330                 .flags = CFTYPE_NOT_ON_ROOT,
7331                 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
7332                 .seq_show = swap_events_show,
7333         },
7334         { }     /* terminate */
7335 };
7336
7337 static struct cftype memsw_files[] = {
7338         {
7339                 .name = "memsw.usage_in_bytes",
7340                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7341                 .read_u64 = mem_cgroup_read_u64,
7342         },
7343         {
7344                 .name = "memsw.max_usage_in_bytes",
7345                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7346                 .write = mem_cgroup_reset,
7347                 .read_u64 = mem_cgroup_read_u64,
7348         },
7349         {
7350                 .name = "memsw.limit_in_bytes",
7351                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7352                 .write = mem_cgroup_write,
7353                 .read_u64 = mem_cgroup_read_u64,
7354         },
7355         {
7356                 .name = "memsw.failcnt",
7357                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7358                 .write = mem_cgroup_reset,
7359                 .read_u64 = mem_cgroup_read_u64,
7360         },
7361         { },    /* terminate */
7362 };
7363
7364 /*
7365  * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
7366  * instead of a core_initcall(), this could mean cgroup_memory_noswap still
7367  * remains set to false even when memcg is disabled via "cgroup_disable=memory"
7368  * boot parameter. This may result in premature OOPS inside
7369  * mem_cgroup_get_nr_swap_pages() function in corner cases.
7370  */
7371 static int __init mem_cgroup_swap_init(void)
7372 {
7373         /* No memory control -> no swap control */
7374         if (mem_cgroup_disabled())
7375                 cgroup_memory_noswap = true;
7376
7377         if (cgroup_memory_noswap)
7378                 return 0;
7379
7380         WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7381         WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7382
7383         return 0;
7384 }
7385 core_initcall(mem_cgroup_swap_init);
7386
7387 #endif /* CONFIG_MEMCG_SWAP */