1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* memcontrol.c - Memory Controller
4 * Copyright IBM Corporation, 2007
5 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
7 * Copyright 2007 OpenVZ SWsoft Inc
8 * Author: Pavel Emelianov <xemul@openvz.org>
11 * Copyright (C) 2009 Nokia Corporation
12 * Author: Kirill A. Shutemov
14 * Kernel Memory Controller
15 * Copyright (C) 2012 Parallels Inc. and Google Inc.
16 * Authors: Glauber Costa and Suleiman Souhlal
19 * Charge lifetime sanitation
20 * Lockless page tracking & accounting
21 * Unified hierarchy configuration model
22 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
24 * Per memcg lru locking
25 * Copyright (C) 2020 Alibaba, Inc, Alex Shi
28 #include <linux/page_counter.h>
29 #include <linux/memcontrol.h>
30 #include <linux/cgroup.h>
31 #include <linux/pagewalk.h>
32 #include <linux/sched/mm.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/hugetlb.h>
35 #include <linux/pagemap.h>
36 #include <linux/vm_event_item.h>
37 #include <linux/smp.h>
38 #include <linux/page-flags.h>
39 #include <linux/backing-dev.h>
40 #include <linux/bit_spinlock.h>
41 #include <linux/rcupdate.h>
42 #include <linux/limits.h>
43 #include <linux/export.h>
44 #include <linux/mutex.h>
45 #include <linux/rbtree.h>
46 #include <linux/slab.h>
47 #include <linux/swap.h>
48 #include <linux/swapops.h>
49 #include <linux/spinlock.h>
50 #include <linux/eventfd.h>
51 #include <linux/poll.h>
52 #include <linux/sort.h>
54 #include <linux/seq_file.h>
55 #include <linux/vmpressure.h>
56 #include <linux/mm_inline.h>
57 #include <linux/swap_cgroup.h>
58 #include <linux/cpu.h>
59 #include <linux/oom.h>
60 #include <linux/lockdep.h>
61 #include <linux/file.h>
62 #include <linux/tracehook.h>
63 #include <linux/psi.h>
64 #include <linux/seq_buf.h>
70 #include <linux/uaccess.h>
72 #include <trace/events/vmscan.h>
74 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
75 EXPORT_SYMBOL(memory_cgrp_subsys);
77 struct mem_cgroup *root_mem_cgroup __read_mostly;
79 /* Active memory cgroup to use from an interrupt context */
80 DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
81 EXPORT_PER_CPU_SYMBOL_GPL(int_active_memcg);
83 /* Socket memory accounting disabled? */
84 static bool cgroup_memory_nosocket;
86 /* Kernel memory accounting disabled? */
87 bool cgroup_memory_nokmem;
89 /* Whether the swap controller is active */
90 #ifdef CONFIG_MEMCG_SWAP
91 bool cgroup_memory_noswap __read_mostly;
93 #define cgroup_memory_noswap 1
96 #ifdef CONFIG_CGROUP_WRITEBACK
97 static DECLARE_WAIT_QUEUE_HEAD(memcg_cgwb_frn_waitq);
100 /* Whether legacy memory+swap accounting is active */
101 static bool do_memsw_account(void)
103 return !cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_noswap;
106 #define THRESHOLDS_EVENTS_TARGET 128
107 #define SOFTLIMIT_EVENTS_TARGET 1024
110 * Cgroups above their limits are maintained in a RB-Tree, independent of
111 * their hierarchy representation
114 struct mem_cgroup_tree_per_node {
115 struct rb_root rb_root;
116 struct rb_node *rb_rightmost;
120 struct mem_cgroup_tree {
121 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
124 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
127 struct mem_cgroup_eventfd_list {
128 struct list_head list;
129 struct eventfd_ctx *eventfd;
133 * cgroup_event represents events which userspace want to receive.
135 struct mem_cgroup_event {
137 * memcg which the event belongs to.
139 struct mem_cgroup *memcg;
141 * eventfd to signal userspace about the event.
143 struct eventfd_ctx *eventfd;
145 * Each of these stored in a list by the cgroup.
147 struct list_head list;
149 * register_event() callback will be used to add new userspace
150 * waiter for changes related to this event. Use eventfd_signal()
151 * on eventfd to send notification to userspace.
153 int (*register_event)(struct mem_cgroup *memcg,
154 struct eventfd_ctx *eventfd, const char *args);
156 * unregister_event() callback will be called when userspace closes
157 * the eventfd or on cgroup removing. This callback must be set,
158 * if you want provide notification functionality.
160 void (*unregister_event)(struct mem_cgroup *memcg,
161 struct eventfd_ctx *eventfd);
163 * All fields below needed to unregister event when
164 * userspace closes eventfd.
167 wait_queue_head_t *wqh;
168 wait_queue_entry_t wait;
169 struct work_struct remove;
172 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
173 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
175 /* Stuffs for move charges at task migration. */
177 * Types of charges to be moved.
179 #define MOVE_ANON 0x1U
180 #define MOVE_FILE 0x2U
181 #define MOVE_MASK (MOVE_ANON | MOVE_FILE)
183 /* "mc" and its members are protected by cgroup_mutex */
184 static struct move_charge_struct {
185 spinlock_t lock; /* for from, to */
186 struct mm_struct *mm;
187 struct mem_cgroup *from;
188 struct mem_cgroup *to;
190 unsigned long precharge;
191 unsigned long moved_charge;
192 unsigned long moved_swap;
193 struct task_struct *moving_task; /* a task moving charges */
194 wait_queue_head_t waitq; /* a waitq for other context */
196 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
197 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
201 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
202 * limit reclaim to prevent infinite loops, if they ever occur.
204 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
205 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
207 /* for encoding cft->private value on file */
216 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
217 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
218 #define MEMFILE_ATTR(val) ((val) & 0xffff)
219 /* Used for OOM notifier */
220 #define OOM_CONTROL (0)
223 * Iteration constructs for visiting all cgroups (under a tree). If
224 * loops are exited prematurely (break), mem_cgroup_iter_break() must
225 * be used for reference counting.
227 #define for_each_mem_cgroup_tree(iter, root) \
228 for (iter = mem_cgroup_iter(root, NULL, NULL); \
230 iter = mem_cgroup_iter(root, iter, NULL))
232 #define for_each_mem_cgroup(iter) \
233 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
235 iter = mem_cgroup_iter(NULL, iter, NULL))
237 static inline bool should_force_charge(void)
239 return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
240 (current->flags & PF_EXITING);
243 /* Some nice accessors for the vmpressure. */
244 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
247 memcg = root_mem_cgroup;
248 return &memcg->vmpressure;
251 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
253 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
256 #ifdef CONFIG_MEMCG_KMEM
257 extern spinlock_t css_set_lock;
259 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
260 unsigned int nr_pages);
262 static void obj_cgroup_release(struct percpu_ref *ref)
264 struct obj_cgroup *objcg = container_of(ref, struct obj_cgroup, refcnt);
265 unsigned int nr_bytes;
266 unsigned int nr_pages;
270 * At this point all allocated objects are freed, and
271 * objcg->nr_charged_bytes can't have an arbitrary byte value.
272 * However, it can be PAGE_SIZE or (x * PAGE_SIZE).
274 * The following sequence can lead to it:
275 * 1) CPU0: objcg == stock->cached_objcg
276 * 2) CPU1: we do a small allocation (e.g. 92 bytes),
277 * PAGE_SIZE bytes are charged
278 * 3) CPU1: a process from another memcg is allocating something,
279 * the stock if flushed,
280 * objcg->nr_charged_bytes = PAGE_SIZE - 92
281 * 5) CPU0: we do release this object,
282 * 92 bytes are added to stock->nr_bytes
283 * 6) CPU0: stock is flushed,
284 * 92 bytes are added to objcg->nr_charged_bytes
286 * In the result, nr_charged_bytes == PAGE_SIZE.
287 * This page will be uncharged in obj_cgroup_release().
289 nr_bytes = atomic_read(&objcg->nr_charged_bytes);
290 WARN_ON_ONCE(nr_bytes & (PAGE_SIZE - 1));
291 nr_pages = nr_bytes >> PAGE_SHIFT;
294 obj_cgroup_uncharge_pages(objcg, nr_pages);
296 spin_lock_irqsave(&css_set_lock, flags);
297 list_del(&objcg->list);
298 spin_unlock_irqrestore(&css_set_lock, flags);
300 percpu_ref_exit(ref);
301 kfree_rcu(objcg, rcu);
304 static struct obj_cgroup *obj_cgroup_alloc(void)
306 struct obj_cgroup *objcg;
309 objcg = kzalloc(sizeof(struct obj_cgroup), GFP_KERNEL);
313 ret = percpu_ref_init(&objcg->refcnt, obj_cgroup_release, 0,
319 INIT_LIST_HEAD(&objcg->list);
323 static void memcg_reparent_objcgs(struct mem_cgroup *memcg,
324 struct mem_cgroup *parent)
326 struct obj_cgroup *objcg, *iter;
328 objcg = rcu_replace_pointer(memcg->objcg, NULL, true);
330 spin_lock_irq(&css_set_lock);
332 /* 1) Ready to reparent active objcg. */
333 list_add(&objcg->list, &memcg->objcg_list);
334 /* 2) Reparent active objcg and already reparented objcgs to parent. */
335 list_for_each_entry(iter, &memcg->objcg_list, list)
336 WRITE_ONCE(iter->memcg, parent);
337 /* 3) Move already reparented objcgs to the parent's list */
338 list_splice(&memcg->objcg_list, &parent->objcg_list);
340 spin_unlock_irq(&css_set_lock);
342 percpu_ref_kill(&objcg->refcnt);
346 * This will be used as a shrinker list's index.
347 * The main reason for not using cgroup id for this:
348 * this works better in sparse environments, where we have a lot of memcgs,
349 * but only a few kmem-limited. Or also, if we have, for instance, 200
350 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
351 * 200 entry array for that.
353 * The current size of the caches array is stored in memcg_nr_cache_ids. It
354 * will double each time we have to increase it.
356 static DEFINE_IDA(memcg_cache_ida);
357 int memcg_nr_cache_ids;
359 /* Protects memcg_nr_cache_ids */
360 static DECLARE_RWSEM(memcg_cache_ids_sem);
362 void memcg_get_cache_ids(void)
364 down_read(&memcg_cache_ids_sem);
367 void memcg_put_cache_ids(void)
369 up_read(&memcg_cache_ids_sem);
373 * MIN_SIZE is different than 1, because we would like to avoid going through
374 * the alloc/free process all the time. In a small machine, 4 kmem-limited
375 * cgroups is a reasonable guess. In the future, it could be a parameter or
376 * tunable, but that is strictly not necessary.
378 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
379 * this constant directly from cgroup, but it is understandable that this is
380 * better kept as an internal representation in cgroup.c. In any case, the
381 * cgrp_id space is not getting any smaller, and we don't have to necessarily
382 * increase ours as well if it increases.
384 #define MEMCG_CACHES_MIN_SIZE 4
385 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
388 * A lot of the calls to the cache allocation functions are expected to be
389 * inlined by the compiler. Since the calls to memcg_slab_pre_alloc_hook() are
390 * conditional to this static branch, we'll have to allow modules that does
391 * kmem_cache_alloc and the such to see this symbol as well
393 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
394 EXPORT_SYMBOL(memcg_kmem_enabled_key);
398 * mem_cgroup_css_from_page - css of the memcg associated with a page
399 * @page: page of interest
401 * If memcg is bound to the default hierarchy, css of the memcg associated
402 * with @page is returned. The returned css remains associated with @page
403 * until it is released.
405 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
408 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
410 struct mem_cgroup *memcg;
412 memcg = page_memcg(page);
414 if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
415 memcg = root_mem_cgroup;
421 * page_cgroup_ino - return inode number of the memcg a page is charged to
424 * Look up the closest online ancestor of the memory cgroup @page is charged to
425 * and return its inode number or 0 if @page is not charged to any cgroup. It
426 * is safe to call this function without holding a reference to @page.
428 * Note, this function is inherently racy, because there is nothing to prevent
429 * the cgroup inode from getting torn down and potentially reallocated a moment
430 * after page_cgroup_ino() returns, so it only should be used by callers that
431 * do not care (such as procfs interfaces).
433 ino_t page_cgroup_ino(struct page *page)
435 struct mem_cgroup *memcg;
436 unsigned long ino = 0;
439 memcg = page_memcg_check(page);
441 while (memcg && !(memcg->css.flags & CSS_ONLINE))
442 memcg = parent_mem_cgroup(memcg);
444 ino = cgroup_ino(memcg->css.cgroup);
449 static struct mem_cgroup_per_node *
450 mem_cgroup_page_nodeinfo(struct mem_cgroup *memcg, struct page *page)
452 int nid = page_to_nid(page);
454 return memcg->nodeinfo[nid];
457 static struct mem_cgroup_tree_per_node *
458 soft_limit_tree_node(int nid)
460 return soft_limit_tree.rb_tree_per_node[nid];
463 static struct mem_cgroup_tree_per_node *
464 soft_limit_tree_from_page(struct page *page)
466 int nid = page_to_nid(page);
468 return soft_limit_tree.rb_tree_per_node[nid];
471 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_node *mz,
472 struct mem_cgroup_tree_per_node *mctz,
473 unsigned long new_usage_in_excess)
475 struct rb_node **p = &mctz->rb_root.rb_node;
476 struct rb_node *parent = NULL;
477 struct mem_cgroup_per_node *mz_node;
478 bool rightmost = true;
483 mz->usage_in_excess = new_usage_in_excess;
484 if (!mz->usage_in_excess)
488 mz_node = rb_entry(parent, struct mem_cgroup_per_node,
490 if (mz->usage_in_excess < mz_node->usage_in_excess) {
499 mctz->rb_rightmost = &mz->tree_node;
501 rb_link_node(&mz->tree_node, parent, p);
502 rb_insert_color(&mz->tree_node, &mctz->rb_root);
506 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
507 struct mem_cgroup_tree_per_node *mctz)
512 if (&mz->tree_node == mctz->rb_rightmost)
513 mctz->rb_rightmost = rb_prev(&mz->tree_node);
515 rb_erase(&mz->tree_node, &mctz->rb_root);
519 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_node *mz,
520 struct mem_cgroup_tree_per_node *mctz)
524 spin_lock_irqsave(&mctz->lock, flags);
525 __mem_cgroup_remove_exceeded(mz, mctz);
526 spin_unlock_irqrestore(&mctz->lock, flags);
529 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
531 unsigned long nr_pages = page_counter_read(&memcg->memory);
532 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
533 unsigned long excess = 0;
535 if (nr_pages > soft_limit)
536 excess = nr_pages - soft_limit;
541 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
543 unsigned long excess;
544 struct mem_cgroup_per_node *mz;
545 struct mem_cgroup_tree_per_node *mctz;
547 mctz = soft_limit_tree_from_page(page);
551 * Necessary to update all ancestors when hierarchy is used.
552 * because their event counter is not touched.
554 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
555 mz = mem_cgroup_page_nodeinfo(memcg, page);
556 excess = soft_limit_excess(memcg);
558 * We have to update the tree if mz is on RB-tree or
559 * mem is over its softlimit.
561 if (excess || mz->on_tree) {
564 spin_lock_irqsave(&mctz->lock, flags);
565 /* if on-tree, remove it */
567 __mem_cgroup_remove_exceeded(mz, mctz);
569 * Insert again. mz->usage_in_excess will be updated.
570 * If excess is 0, no tree ops.
572 __mem_cgroup_insert_exceeded(mz, mctz, excess);
573 spin_unlock_irqrestore(&mctz->lock, flags);
578 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
580 struct mem_cgroup_tree_per_node *mctz;
581 struct mem_cgroup_per_node *mz;
585 mz = memcg->nodeinfo[nid];
586 mctz = soft_limit_tree_node(nid);
588 mem_cgroup_remove_exceeded(mz, mctz);
592 static struct mem_cgroup_per_node *
593 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
595 struct mem_cgroup_per_node *mz;
599 if (!mctz->rb_rightmost)
600 goto done; /* Nothing to reclaim from */
602 mz = rb_entry(mctz->rb_rightmost,
603 struct mem_cgroup_per_node, tree_node);
605 * Remove the node now but someone else can add it back,
606 * we will to add it back at the end of reclaim to its correct
607 * position in the tree.
609 __mem_cgroup_remove_exceeded(mz, mctz);
610 if (!soft_limit_excess(mz->memcg) ||
611 !css_tryget(&mz->memcg->css))
617 static struct mem_cgroup_per_node *
618 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_node *mctz)
620 struct mem_cgroup_per_node *mz;
622 spin_lock_irq(&mctz->lock);
623 mz = __mem_cgroup_largest_soft_limit_node(mctz);
624 spin_unlock_irq(&mctz->lock);
629 * __mod_memcg_state - update cgroup memory statistics
630 * @memcg: the memory cgroup
631 * @idx: the stat item - can be enum memcg_stat_item or enum node_stat_item
632 * @val: delta to add to the counter, can be negative
634 void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val)
636 if (mem_cgroup_disabled())
639 __this_cpu_add(memcg->vmstats_percpu->state[idx], val);
640 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
643 /* idx can be of type enum memcg_stat_item or node_stat_item. */
644 static unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
646 long x = READ_ONCE(memcg->vmstats.state[idx]);
654 /* idx can be of type enum memcg_stat_item or node_stat_item. */
655 static unsigned long memcg_page_state_local(struct mem_cgroup *memcg, int idx)
660 for_each_possible_cpu(cpu)
661 x += per_cpu(memcg->vmstats_percpu->state[idx], cpu);
669 static struct mem_cgroup_per_node *
670 parent_nodeinfo(struct mem_cgroup_per_node *pn, int nid)
672 struct mem_cgroup *parent;
674 parent = parent_mem_cgroup(pn->memcg);
677 return parent->nodeinfo[nid];
680 void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
683 struct mem_cgroup_per_node *pn;
684 struct mem_cgroup *memcg;
685 long x, threshold = MEMCG_CHARGE_BATCH;
687 pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
691 __mod_memcg_state(memcg, idx, val);
694 __this_cpu_add(pn->lruvec_stat_local->count[idx], val);
696 if (vmstat_item_in_bytes(idx))
697 threshold <<= PAGE_SHIFT;
699 x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]);
700 if (unlikely(abs(x) > threshold)) {
701 pg_data_t *pgdat = lruvec_pgdat(lruvec);
702 struct mem_cgroup_per_node *pi;
704 for (pi = pn; pi; pi = parent_nodeinfo(pi, pgdat->node_id))
705 atomic_long_add(x, &pi->lruvec_stat[idx]);
708 __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x);
712 * __mod_lruvec_state - update lruvec memory statistics
713 * @lruvec: the lruvec
714 * @idx: the stat item
715 * @val: delta to add to the counter, can be negative
717 * The lruvec is the intersection of the NUMA node and a cgroup. This
718 * function updates the all three counters that are affected by a
719 * change of state at this level: per-node, per-cgroup, per-lruvec.
721 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
725 __mod_node_page_state(lruvec_pgdat(lruvec), idx, val);
727 /* Update memcg and lruvec */
728 if (!mem_cgroup_disabled())
729 __mod_memcg_lruvec_state(lruvec, idx, val);
732 void __mod_lruvec_page_state(struct page *page, enum node_stat_item idx,
735 struct page *head = compound_head(page); /* rmap on tail pages */
736 struct mem_cgroup *memcg;
737 pg_data_t *pgdat = page_pgdat(page);
738 struct lruvec *lruvec;
741 memcg = page_memcg(head);
742 /* Untracked pages have no memcg, no lruvec. Update only the node */
745 __mod_node_page_state(pgdat, idx, val);
749 lruvec = mem_cgroup_lruvec(memcg, pgdat);
750 __mod_lruvec_state(lruvec, idx, val);
753 EXPORT_SYMBOL(__mod_lruvec_page_state);
755 void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
757 pg_data_t *pgdat = page_pgdat(virt_to_page(p));
758 struct mem_cgroup *memcg;
759 struct lruvec *lruvec;
762 memcg = mem_cgroup_from_obj(p);
765 * Untracked pages have no memcg, no lruvec. Update only the
766 * node. If we reparent the slab objects to the root memcg,
767 * when we free the slab object, we need to update the per-memcg
768 * vmstats to keep it correct for the root memcg.
771 __mod_node_page_state(pgdat, idx, val);
773 lruvec = mem_cgroup_lruvec(memcg, pgdat);
774 __mod_lruvec_state(lruvec, idx, val);
780 * mod_objcg_mlstate() may be called with irq enabled, so
781 * mod_memcg_lruvec_state() should be used.
783 static inline void mod_objcg_mlstate(struct obj_cgroup *objcg,
784 struct pglist_data *pgdat,
785 enum node_stat_item idx, int nr)
787 struct mem_cgroup *memcg;
788 struct lruvec *lruvec;
791 memcg = obj_cgroup_memcg(objcg);
792 lruvec = mem_cgroup_lruvec(memcg, pgdat);
793 mod_memcg_lruvec_state(lruvec, idx, nr);
798 * __count_memcg_events - account VM events in a cgroup
799 * @memcg: the memory cgroup
800 * @idx: the event item
801 * @count: the number of events that occurred
803 void __count_memcg_events(struct mem_cgroup *memcg, enum vm_event_item idx,
806 if (mem_cgroup_disabled())
809 __this_cpu_add(memcg->vmstats_percpu->events[idx], count);
810 cgroup_rstat_updated(memcg->css.cgroup, smp_processor_id());
813 static unsigned long memcg_events(struct mem_cgroup *memcg, int event)
815 return READ_ONCE(memcg->vmstats.events[event]);
818 static unsigned long memcg_events_local(struct mem_cgroup *memcg, int event)
823 for_each_possible_cpu(cpu)
824 x += per_cpu(memcg->vmstats_percpu->events[event], cpu);
828 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
832 /* pagein of a big page is an event. So, ignore page size */
834 __count_memcg_events(memcg, PGPGIN, 1);
836 __count_memcg_events(memcg, PGPGOUT, 1);
837 nr_pages = -nr_pages; /* for event */
840 __this_cpu_add(memcg->vmstats_percpu->nr_page_events, nr_pages);
843 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
844 enum mem_cgroup_events_target target)
846 unsigned long val, next;
848 val = __this_cpu_read(memcg->vmstats_percpu->nr_page_events);
849 next = __this_cpu_read(memcg->vmstats_percpu->targets[target]);
850 /* from time_after() in jiffies.h */
851 if ((long)(next - val) < 0) {
853 case MEM_CGROUP_TARGET_THRESH:
854 next = val + THRESHOLDS_EVENTS_TARGET;
856 case MEM_CGROUP_TARGET_SOFTLIMIT:
857 next = val + SOFTLIMIT_EVENTS_TARGET;
862 __this_cpu_write(memcg->vmstats_percpu->targets[target], next);
869 * Check events in order.
872 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
874 /* threshold event is triggered in finer grain than soft limit */
875 if (unlikely(mem_cgroup_event_ratelimit(memcg,
876 MEM_CGROUP_TARGET_THRESH))) {
879 do_softlimit = mem_cgroup_event_ratelimit(memcg,
880 MEM_CGROUP_TARGET_SOFTLIMIT);
881 mem_cgroup_threshold(memcg);
882 if (unlikely(do_softlimit))
883 mem_cgroup_update_tree(memcg, page);
887 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
890 * mm_update_next_owner() may clear mm->owner to NULL
891 * if it races with swapoff, page migration, etc.
892 * So this can be called with p == NULL.
897 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
899 EXPORT_SYMBOL(mem_cgroup_from_task);
901 static __always_inline struct mem_cgroup *active_memcg(void)
904 return this_cpu_read(int_active_memcg);
906 return current->active_memcg;
910 * get_mem_cgroup_from_mm: Obtain a reference on given mm_struct's memcg.
911 * @mm: mm from which memcg should be extracted. It can be NULL.
913 * Obtain a reference on mm->memcg and returns it if successful. If mm
914 * is NULL, then the memcg is chosen as follows:
915 * 1) The active memcg, if set.
916 * 2) current->mm->memcg, if available
918 * If mem_cgroup is disabled, NULL is returned.
920 struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
922 struct mem_cgroup *memcg;
924 if (mem_cgroup_disabled())
928 * Page cache insertions can happen without an
929 * actual mm context, e.g. during disk probing
930 * on boot, loopback IO, acct() writes etc.
932 * No need to css_get on root memcg as the reference
933 * counting is disabled on the root level in the
934 * cgroup core. See CSS_NO_REF.
937 memcg = active_memcg();
938 if (unlikely(memcg)) {
939 /* remote memcg must hold a ref */
940 css_get(&memcg->css);
945 return root_mem_cgroup;
950 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
951 if (unlikely(!memcg))
952 memcg = root_mem_cgroup;
953 } while (!css_tryget(&memcg->css));
957 EXPORT_SYMBOL(get_mem_cgroup_from_mm);
959 static __always_inline bool memcg_kmem_bypass(void)
961 /* Allow remote memcg charging from any context. */
962 if (unlikely(active_memcg()))
965 /* Memcg to charge can't be determined. */
966 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
973 * mem_cgroup_iter - iterate over memory cgroup hierarchy
974 * @root: hierarchy root
975 * @prev: previously returned memcg, NULL on first invocation
976 * @reclaim: cookie for shared reclaim walks, NULL for full walks
978 * Returns references to children of the hierarchy below @root, or
979 * @root itself, or %NULL after a full round-trip.
981 * Caller must pass the return value in @prev on subsequent
982 * invocations for reference counting, or use mem_cgroup_iter_break()
983 * to cancel a hierarchy walk before the round-trip is complete.
985 * Reclaimers can specify a node in @reclaim to divide up the memcgs
986 * in the hierarchy among all concurrent reclaimers operating on the
989 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
990 struct mem_cgroup *prev,
991 struct mem_cgroup_reclaim_cookie *reclaim)
993 struct mem_cgroup_reclaim_iter *iter;
994 struct cgroup_subsys_state *css = NULL;
995 struct mem_cgroup *memcg = NULL;
996 struct mem_cgroup *pos = NULL;
998 if (mem_cgroup_disabled())
1002 root = root_mem_cgroup;
1004 if (prev && !reclaim)
1010 struct mem_cgroup_per_node *mz;
1012 mz = root->nodeinfo[reclaim->pgdat->node_id];
1015 if (prev && reclaim->generation != iter->generation)
1019 pos = READ_ONCE(iter->position);
1020 if (!pos || css_tryget(&pos->css))
1023 * css reference reached zero, so iter->position will
1024 * be cleared by ->css_released. However, we should not
1025 * rely on this happening soon, because ->css_released
1026 * is called from a work queue, and by busy-waiting we
1027 * might block it. So we clear iter->position right
1030 (void)cmpxchg(&iter->position, pos, NULL);
1038 css = css_next_descendant_pre(css, &root->css);
1041 * Reclaimers share the hierarchy walk, and a
1042 * new one might jump in right at the end of
1043 * the hierarchy - make sure they see at least
1044 * one group and restart from the beginning.
1052 * Verify the css and acquire a reference. The root
1053 * is provided by the caller, so we know it's alive
1054 * and kicking, and don't take an extra reference.
1056 memcg = mem_cgroup_from_css(css);
1058 if (css == &root->css)
1061 if (css_tryget(css))
1069 * The position could have already been updated by a competing
1070 * thread, so check that the value hasn't changed since we read
1071 * it to avoid reclaiming from the same cgroup twice.
1073 (void)cmpxchg(&iter->position, pos, memcg);
1081 reclaim->generation = iter->generation;
1086 if (prev && prev != root)
1087 css_put(&prev->css);
1093 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
1094 * @root: hierarchy root
1095 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
1097 void mem_cgroup_iter_break(struct mem_cgroup *root,
1098 struct mem_cgroup *prev)
1101 root = root_mem_cgroup;
1102 if (prev && prev != root)
1103 css_put(&prev->css);
1106 static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
1107 struct mem_cgroup *dead_memcg)
1109 struct mem_cgroup_reclaim_iter *iter;
1110 struct mem_cgroup_per_node *mz;
1113 for_each_node(nid) {
1114 mz = from->nodeinfo[nid];
1116 cmpxchg(&iter->position, dead_memcg, NULL);
1120 static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
1122 struct mem_cgroup *memcg = dead_memcg;
1123 struct mem_cgroup *last;
1126 __invalidate_reclaim_iterators(memcg, dead_memcg);
1128 } while ((memcg = parent_mem_cgroup(memcg)));
1131 * When cgruop1 non-hierarchy mode is used,
1132 * parent_mem_cgroup() does not walk all the way up to the
1133 * cgroup root (root_mem_cgroup). So we have to handle
1134 * dead_memcg from cgroup root separately.
1136 if (last != root_mem_cgroup)
1137 __invalidate_reclaim_iterators(root_mem_cgroup,
1142 * mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
1143 * @memcg: hierarchy root
1144 * @fn: function to call for each task
1145 * @arg: argument passed to @fn
1147 * This function iterates over tasks attached to @memcg or to any of its
1148 * descendants and calls @fn for each task. If @fn returns a non-zero
1149 * value, the function breaks the iteration loop and returns the value.
1150 * Otherwise, it will iterate over all tasks and return 0.
1152 * This function must not be called for the root memory cgroup.
1154 int mem_cgroup_scan_tasks(struct mem_cgroup *memcg,
1155 int (*fn)(struct task_struct *, void *), void *arg)
1157 struct mem_cgroup *iter;
1160 BUG_ON(memcg == root_mem_cgroup);
1162 for_each_mem_cgroup_tree(iter, memcg) {
1163 struct css_task_iter it;
1164 struct task_struct *task;
1166 css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it);
1167 while (!ret && (task = css_task_iter_next(&it)))
1168 ret = fn(task, arg);
1169 css_task_iter_end(&it);
1171 mem_cgroup_iter_break(memcg, iter);
1178 #ifdef CONFIG_DEBUG_VM
1179 void lruvec_memcg_debug(struct lruvec *lruvec, struct page *page)
1181 struct mem_cgroup *memcg;
1183 if (mem_cgroup_disabled())
1186 memcg = page_memcg(page);
1189 VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != root_mem_cgroup, page);
1191 VM_BUG_ON_PAGE(lruvec_memcg(lruvec) != memcg, page);
1196 * lock_page_lruvec - lock and return lruvec for a given page.
1199 * These functions are safe to use under any of the following conditions:
1202 * - lock_page_memcg()
1203 * - page->_refcount is zero
1205 struct lruvec *lock_page_lruvec(struct page *page)
1207 struct lruvec *lruvec;
1209 lruvec = mem_cgroup_page_lruvec(page);
1210 spin_lock(&lruvec->lru_lock);
1212 lruvec_memcg_debug(lruvec, page);
1217 struct lruvec *lock_page_lruvec_irq(struct page *page)
1219 struct lruvec *lruvec;
1221 lruvec = mem_cgroup_page_lruvec(page);
1222 spin_lock_irq(&lruvec->lru_lock);
1224 lruvec_memcg_debug(lruvec, page);
1229 struct lruvec *lock_page_lruvec_irqsave(struct page *page, unsigned long *flags)
1231 struct lruvec *lruvec;
1233 lruvec = mem_cgroup_page_lruvec(page);
1234 spin_lock_irqsave(&lruvec->lru_lock, *flags);
1236 lruvec_memcg_debug(lruvec, page);
1242 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1243 * @lruvec: mem_cgroup per zone lru vector
1244 * @lru: index of lru list the page is sitting on
1245 * @zid: zone id of the accounted pages
1246 * @nr_pages: positive when adding or negative when removing
1248 * This function must be called under lru_lock, just before a page is added
1249 * to or just after a page is removed from an lru list (that ordering being
1250 * so as to allow it to check that lru_size 0 is consistent with list_empty).
1252 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1253 int zid, int nr_pages)
1255 struct mem_cgroup_per_node *mz;
1256 unsigned long *lru_size;
1259 if (mem_cgroup_disabled())
1262 mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
1263 lru_size = &mz->lru_zone_size[zid][lru];
1266 *lru_size += nr_pages;
1269 if (WARN_ONCE(size < 0,
1270 "%s(%p, %d, %d): lru_size %ld\n",
1271 __func__, lruvec, lru, nr_pages, size)) {
1277 *lru_size += nr_pages;
1281 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1282 * @memcg: the memory cgroup
1284 * Returns the maximum amount of memory @mem can be charged with, in
1287 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1289 unsigned long margin = 0;
1290 unsigned long count;
1291 unsigned long limit;
1293 count = page_counter_read(&memcg->memory);
1294 limit = READ_ONCE(memcg->memory.max);
1296 margin = limit - count;
1298 if (do_memsw_account()) {
1299 count = page_counter_read(&memcg->memsw);
1300 limit = READ_ONCE(memcg->memsw.max);
1302 margin = min(margin, limit - count);
1311 * A routine for checking "mem" is under move_account() or not.
1313 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1314 * moving cgroups. This is for waiting at high-memory pressure
1317 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1319 struct mem_cgroup *from;
1320 struct mem_cgroup *to;
1323 * Unlike task_move routines, we access mc.to, mc.from not under
1324 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1326 spin_lock(&mc.lock);
1332 ret = mem_cgroup_is_descendant(from, memcg) ||
1333 mem_cgroup_is_descendant(to, memcg);
1335 spin_unlock(&mc.lock);
1339 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1341 if (mc.moving_task && current != mc.moving_task) {
1342 if (mem_cgroup_under_move(memcg)) {
1344 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1345 /* moving charge context might have finished. */
1348 finish_wait(&mc.waitq, &wait);
1355 struct memory_stat {
1360 static const struct memory_stat memory_stats[] = {
1361 { "anon", NR_ANON_MAPPED },
1362 { "file", NR_FILE_PAGES },
1363 { "kernel_stack", NR_KERNEL_STACK_KB },
1364 { "pagetables", NR_PAGETABLE },
1365 { "percpu", MEMCG_PERCPU_B },
1366 { "sock", MEMCG_SOCK },
1367 { "shmem", NR_SHMEM },
1368 { "file_mapped", NR_FILE_MAPPED },
1369 { "file_dirty", NR_FILE_DIRTY },
1370 { "file_writeback", NR_WRITEBACK },
1372 { "swapcached", NR_SWAPCACHE },
1374 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1375 { "anon_thp", NR_ANON_THPS },
1376 { "file_thp", NR_FILE_THPS },
1377 { "shmem_thp", NR_SHMEM_THPS },
1379 { "inactive_anon", NR_INACTIVE_ANON },
1380 { "active_anon", NR_ACTIVE_ANON },
1381 { "inactive_file", NR_INACTIVE_FILE },
1382 { "active_file", NR_ACTIVE_FILE },
1383 { "unevictable", NR_UNEVICTABLE },
1384 { "slab_reclaimable", NR_SLAB_RECLAIMABLE_B },
1385 { "slab_unreclaimable", NR_SLAB_UNRECLAIMABLE_B },
1387 /* The memory events */
1388 { "workingset_refault_anon", WORKINGSET_REFAULT_ANON },
1389 { "workingset_refault_file", WORKINGSET_REFAULT_FILE },
1390 { "workingset_activate_anon", WORKINGSET_ACTIVATE_ANON },
1391 { "workingset_activate_file", WORKINGSET_ACTIVATE_FILE },
1392 { "workingset_restore_anon", WORKINGSET_RESTORE_ANON },
1393 { "workingset_restore_file", WORKINGSET_RESTORE_FILE },
1394 { "workingset_nodereclaim", WORKINGSET_NODERECLAIM },
1397 /* Translate stat items to the correct unit for memory.stat output */
1398 static int memcg_page_state_unit(int item)
1401 case MEMCG_PERCPU_B:
1402 case NR_SLAB_RECLAIMABLE_B:
1403 case NR_SLAB_UNRECLAIMABLE_B:
1404 case WORKINGSET_REFAULT_ANON:
1405 case WORKINGSET_REFAULT_FILE:
1406 case WORKINGSET_ACTIVATE_ANON:
1407 case WORKINGSET_ACTIVATE_FILE:
1408 case WORKINGSET_RESTORE_ANON:
1409 case WORKINGSET_RESTORE_FILE:
1410 case WORKINGSET_NODERECLAIM:
1412 case NR_KERNEL_STACK_KB:
1419 static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg,
1422 return memcg_page_state(memcg, item) * memcg_page_state_unit(item);
1425 static char *memory_stat_format(struct mem_cgroup *memcg)
1430 seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
1435 * Provide statistics on the state of the memory subsystem as
1436 * well as cumulative event counters that show past behavior.
1438 * This list is ordered following a combination of these gradients:
1439 * 1) generic big picture -> specifics and details
1440 * 2) reflecting userspace activity -> reflecting kernel heuristics
1442 * Current memory state:
1444 cgroup_rstat_flush(memcg->css.cgroup);
1446 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
1449 size = memcg_page_state_output(memcg, memory_stats[i].idx);
1450 seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size);
1452 if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) {
1453 size += memcg_page_state_output(memcg,
1454 NR_SLAB_RECLAIMABLE_B);
1455 seq_buf_printf(&s, "slab %llu\n", size);
1459 /* Accumulated memory events */
1461 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGFAULT),
1462 memcg_events(memcg, PGFAULT));
1463 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGMAJFAULT),
1464 memcg_events(memcg, PGMAJFAULT));
1465 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGREFILL),
1466 memcg_events(memcg, PGREFILL));
1467 seq_buf_printf(&s, "pgscan %lu\n",
1468 memcg_events(memcg, PGSCAN_KSWAPD) +
1469 memcg_events(memcg, PGSCAN_DIRECT));
1470 seq_buf_printf(&s, "pgsteal %lu\n",
1471 memcg_events(memcg, PGSTEAL_KSWAPD) +
1472 memcg_events(memcg, PGSTEAL_DIRECT));
1473 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGACTIVATE),
1474 memcg_events(memcg, PGACTIVATE));
1475 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGDEACTIVATE),
1476 memcg_events(memcg, PGDEACTIVATE));
1477 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREE),
1478 memcg_events(memcg, PGLAZYFREE));
1479 seq_buf_printf(&s, "%s %lu\n", vm_event_name(PGLAZYFREED),
1480 memcg_events(memcg, PGLAZYFREED));
1482 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1483 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_FAULT_ALLOC),
1484 memcg_events(memcg, THP_FAULT_ALLOC));
1485 seq_buf_printf(&s, "%s %lu\n", vm_event_name(THP_COLLAPSE_ALLOC),
1486 memcg_events(memcg, THP_COLLAPSE_ALLOC));
1487 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1489 /* The above should easily fit into one page */
1490 WARN_ON_ONCE(seq_buf_has_overflowed(&s));
1495 #define K(x) ((x) << (PAGE_SHIFT-10))
1497 * mem_cgroup_print_oom_context: Print OOM information relevant to
1498 * memory controller.
1499 * @memcg: The memory cgroup that went over limit
1500 * @p: Task that is going to be killed
1502 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1505 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
1510 pr_cont(",oom_memcg=");
1511 pr_cont_cgroup_path(memcg->css.cgroup);
1513 pr_cont(",global_oom");
1515 pr_cont(",task_memcg=");
1516 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1522 * mem_cgroup_print_oom_meminfo: Print OOM memory information relevant to
1523 * memory controller.
1524 * @memcg: The memory cgroup that went over limit
1526 void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg)
1530 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1531 K((u64)page_counter_read(&memcg->memory)),
1532 K((u64)READ_ONCE(memcg->memory.max)), memcg->memory.failcnt);
1533 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
1534 pr_info("swap: usage %llukB, limit %llukB, failcnt %lu\n",
1535 K((u64)page_counter_read(&memcg->swap)),
1536 K((u64)READ_ONCE(memcg->swap.max)), memcg->swap.failcnt);
1538 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1539 K((u64)page_counter_read(&memcg->memsw)),
1540 K((u64)memcg->memsw.max), memcg->memsw.failcnt);
1541 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1542 K((u64)page_counter_read(&memcg->kmem)),
1543 K((u64)memcg->kmem.max), memcg->kmem.failcnt);
1546 pr_info("Memory cgroup stats for ");
1547 pr_cont_cgroup_path(memcg->css.cgroup);
1549 buf = memory_stat_format(memcg);
1557 * Return the memory (and swap, if configured) limit for a memcg.
1559 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
1561 unsigned long max = READ_ONCE(memcg->memory.max);
1563 if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
1564 if (mem_cgroup_swappiness(memcg))
1565 max += min(READ_ONCE(memcg->swap.max),
1566 (unsigned long)total_swap_pages);
1568 if (mem_cgroup_swappiness(memcg)) {
1569 /* Calculate swap excess capacity from memsw limit */
1570 unsigned long swap = READ_ONCE(memcg->memsw.max) - max;
1572 max += min(swap, (unsigned long)total_swap_pages);
1578 unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
1580 return page_counter_read(&memcg->memory);
1583 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1586 struct oom_control oc = {
1590 .gfp_mask = gfp_mask,
1595 if (mutex_lock_killable(&oom_lock))
1598 if (mem_cgroup_margin(memcg) >= (1 << order))
1602 * A few threads which were not waiting at mutex_lock_killable() can
1603 * fail to bail out. Therefore, check again after holding oom_lock.
1605 ret = should_force_charge() || out_of_memory(&oc);
1608 mutex_unlock(&oom_lock);
1612 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1615 unsigned long *total_scanned)
1617 struct mem_cgroup *victim = NULL;
1620 unsigned long excess;
1621 unsigned long nr_scanned;
1622 struct mem_cgroup_reclaim_cookie reclaim = {
1626 excess = soft_limit_excess(root_memcg);
1629 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1634 * If we have not been able to reclaim
1635 * anything, it might because there are
1636 * no reclaimable pages under this hierarchy
1641 * We want to do more targeted reclaim.
1642 * excess >> 2 is not to excessive so as to
1643 * reclaim too much, nor too less that we keep
1644 * coming back to reclaim from this cgroup
1646 if (total >= (excess >> 2) ||
1647 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1652 total += mem_cgroup_shrink_node(victim, gfp_mask, false,
1653 pgdat, &nr_scanned);
1654 *total_scanned += nr_scanned;
1655 if (!soft_limit_excess(root_memcg))
1658 mem_cgroup_iter_break(root_memcg, victim);
1662 #ifdef CONFIG_LOCKDEP
1663 static struct lockdep_map memcg_oom_lock_dep_map = {
1664 .name = "memcg_oom_lock",
1668 static DEFINE_SPINLOCK(memcg_oom_lock);
1671 * Check OOM-Killer is already running under our hierarchy.
1672 * If someone is running, return false.
1674 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1676 struct mem_cgroup *iter, *failed = NULL;
1678 spin_lock(&memcg_oom_lock);
1680 for_each_mem_cgroup_tree(iter, memcg) {
1681 if (iter->oom_lock) {
1683 * this subtree of our hierarchy is already locked
1684 * so we cannot give a lock.
1687 mem_cgroup_iter_break(memcg, iter);
1690 iter->oom_lock = true;
1695 * OK, we failed to lock the whole subtree so we have
1696 * to clean up what we set up to the failing subtree
1698 for_each_mem_cgroup_tree(iter, memcg) {
1699 if (iter == failed) {
1700 mem_cgroup_iter_break(memcg, iter);
1703 iter->oom_lock = false;
1706 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1708 spin_unlock(&memcg_oom_lock);
1713 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1715 struct mem_cgroup *iter;
1717 spin_lock(&memcg_oom_lock);
1718 mutex_release(&memcg_oom_lock_dep_map, _RET_IP_);
1719 for_each_mem_cgroup_tree(iter, memcg)
1720 iter->oom_lock = false;
1721 spin_unlock(&memcg_oom_lock);
1724 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1726 struct mem_cgroup *iter;
1728 spin_lock(&memcg_oom_lock);
1729 for_each_mem_cgroup_tree(iter, memcg)
1731 spin_unlock(&memcg_oom_lock);
1734 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1736 struct mem_cgroup *iter;
1739 * Be careful about under_oom underflows because a child memcg
1740 * could have been added after mem_cgroup_mark_under_oom.
1742 spin_lock(&memcg_oom_lock);
1743 for_each_mem_cgroup_tree(iter, memcg)
1744 if (iter->under_oom > 0)
1746 spin_unlock(&memcg_oom_lock);
1749 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1751 struct oom_wait_info {
1752 struct mem_cgroup *memcg;
1753 wait_queue_entry_t wait;
1756 static int memcg_oom_wake_function(wait_queue_entry_t *wait,
1757 unsigned mode, int sync, void *arg)
1759 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1760 struct mem_cgroup *oom_wait_memcg;
1761 struct oom_wait_info *oom_wait_info;
1763 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1764 oom_wait_memcg = oom_wait_info->memcg;
1766 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1767 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1769 return autoremove_wake_function(wait, mode, sync, arg);
1772 static void memcg_oom_recover(struct mem_cgroup *memcg)
1775 * For the following lockless ->under_oom test, the only required
1776 * guarantee is that it must see the state asserted by an OOM when
1777 * this function is called as a result of userland actions
1778 * triggered by the notification of the OOM. This is trivially
1779 * achieved by invoking mem_cgroup_mark_under_oom() before
1780 * triggering notification.
1782 if (memcg && memcg->under_oom)
1783 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1793 static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1795 enum oom_status ret;
1798 if (order > PAGE_ALLOC_COSTLY_ORDER)
1801 memcg_memory_event(memcg, MEMCG_OOM);
1804 * We are in the middle of the charge context here, so we
1805 * don't want to block when potentially sitting on a callstack
1806 * that holds all kinds of filesystem and mm locks.
1808 * cgroup1 allows disabling the OOM killer and waiting for outside
1809 * handling until the charge can succeed; remember the context and put
1810 * the task to sleep at the end of the page fault when all locks are
1813 * On the other hand, in-kernel OOM killer allows for an async victim
1814 * memory reclaim (oom_reaper) and that means that we are not solely
1815 * relying on the oom victim to make a forward progress and we can
1816 * invoke the oom killer here.
1818 * Please note that mem_cgroup_out_of_memory might fail to find a
1819 * victim and then we have to bail out from the charge path.
1821 if (memcg->oom_kill_disable) {
1822 if (!current->in_user_fault)
1824 css_get(&memcg->css);
1825 current->memcg_in_oom = memcg;
1826 current->memcg_oom_gfp_mask = mask;
1827 current->memcg_oom_order = order;
1832 mem_cgroup_mark_under_oom(memcg);
1834 locked = mem_cgroup_oom_trylock(memcg);
1837 mem_cgroup_oom_notify(memcg);
1839 mem_cgroup_unmark_under_oom(memcg);
1840 if (mem_cgroup_out_of_memory(memcg, mask, order))
1846 mem_cgroup_oom_unlock(memcg);
1852 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1853 * @handle: actually kill/wait or just clean up the OOM state
1855 * This has to be called at the end of a page fault if the memcg OOM
1856 * handler was enabled.
1858 * Memcg supports userspace OOM handling where failed allocations must
1859 * sleep on a waitqueue until the userspace task resolves the
1860 * situation. Sleeping directly in the charge context with all kinds
1861 * of locks held is not a good idea, instead we remember an OOM state
1862 * in the task and mem_cgroup_oom_synchronize() has to be called at
1863 * the end of the page fault to complete the OOM handling.
1865 * Returns %true if an ongoing memcg OOM situation was detected and
1866 * completed, %false otherwise.
1868 bool mem_cgroup_oom_synchronize(bool handle)
1870 struct mem_cgroup *memcg = current->memcg_in_oom;
1871 struct oom_wait_info owait;
1874 /* OOM is global, do not handle */
1881 owait.memcg = memcg;
1882 owait.wait.flags = 0;
1883 owait.wait.func = memcg_oom_wake_function;
1884 owait.wait.private = current;
1885 INIT_LIST_HEAD(&owait.wait.entry);
1887 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1888 mem_cgroup_mark_under_oom(memcg);
1890 locked = mem_cgroup_oom_trylock(memcg);
1893 mem_cgroup_oom_notify(memcg);
1895 if (locked && !memcg->oom_kill_disable) {
1896 mem_cgroup_unmark_under_oom(memcg);
1897 finish_wait(&memcg_oom_waitq, &owait.wait);
1898 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1899 current->memcg_oom_order);
1902 mem_cgroup_unmark_under_oom(memcg);
1903 finish_wait(&memcg_oom_waitq, &owait.wait);
1907 mem_cgroup_oom_unlock(memcg);
1909 * There is no guarantee that an OOM-lock contender
1910 * sees the wakeups triggered by the OOM kill
1911 * uncharges. Wake any sleepers explicitly.
1913 memcg_oom_recover(memcg);
1916 current->memcg_in_oom = NULL;
1917 css_put(&memcg->css);
1922 * mem_cgroup_get_oom_group - get a memory cgroup to clean up after OOM
1923 * @victim: task to be killed by the OOM killer
1924 * @oom_domain: memcg in case of memcg OOM, NULL in case of system-wide OOM
1926 * Returns a pointer to a memory cgroup, which has to be cleaned up
1927 * by killing all belonging OOM-killable tasks.
1929 * Caller has to call mem_cgroup_put() on the returned non-NULL memcg.
1931 struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
1932 struct mem_cgroup *oom_domain)
1934 struct mem_cgroup *oom_group = NULL;
1935 struct mem_cgroup *memcg;
1937 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
1941 oom_domain = root_mem_cgroup;
1945 memcg = mem_cgroup_from_task(victim);
1946 if (memcg == root_mem_cgroup)
1950 * If the victim task has been asynchronously moved to a different
1951 * memory cgroup, we might end up killing tasks outside oom_domain.
1952 * In this case it's better to ignore memory.group.oom.
1954 if (unlikely(!mem_cgroup_is_descendant(memcg, oom_domain)))
1958 * Traverse the memory cgroup hierarchy from the victim task's
1959 * cgroup up to the OOMing cgroup (or root) to find the
1960 * highest-level memory cgroup with oom.group set.
1962 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
1963 if (memcg->oom_group)
1966 if (memcg == oom_domain)
1971 css_get(&oom_group->css);
1978 void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
1980 pr_info("Tasks in ");
1981 pr_cont_cgroup_path(memcg->css.cgroup);
1982 pr_cont(" are going to be killed due to memory.oom.group set\n");
1986 * lock_page_memcg - lock a page and memcg binding
1989 * This function protects unlocked LRU pages from being moved to
1992 * It ensures lifetime of the locked memcg. Caller is responsible
1993 * for the lifetime of the page.
1995 void lock_page_memcg(struct page *page)
1997 struct page *head = compound_head(page); /* rmap on tail pages */
1998 struct mem_cgroup *memcg;
1999 unsigned long flags;
2002 * The RCU lock is held throughout the transaction. The fast
2003 * path can get away without acquiring the memcg->move_lock
2004 * because page moving starts with an RCU grace period.
2008 if (mem_cgroup_disabled())
2011 memcg = page_memcg(head);
2012 if (unlikely(!memcg))
2015 #ifdef CONFIG_PROVE_LOCKING
2016 local_irq_save(flags);
2017 might_lock(&memcg->move_lock);
2018 local_irq_restore(flags);
2021 if (atomic_read(&memcg->moving_account) <= 0)
2024 spin_lock_irqsave(&memcg->move_lock, flags);
2025 if (memcg != page_memcg(head)) {
2026 spin_unlock_irqrestore(&memcg->move_lock, flags);
2031 * When charge migration first begins, we can have multiple
2032 * critical sections holding the fast-path RCU lock and one
2033 * holding the slowpath move_lock. Track the task who has the
2034 * move_lock for unlock_page_memcg().
2036 memcg->move_lock_task = current;
2037 memcg->move_lock_flags = flags;
2039 EXPORT_SYMBOL(lock_page_memcg);
2041 static void __unlock_page_memcg(struct mem_cgroup *memcg)
2043 if (memcg && memcg->move_lock_task == current) {
2044 unsigned long flags = memcg->move_lock_flags;
2046 memcg->move_lock_task = NULL;
2047 memcg->move_lock_flags = 0;
2049 spin_unlock_irqrestore(&memcg->move_lock, flags);
2056 * unlock_page_memcg - unlock a page and memcg binding
2059 void unlock_page_memcg(struct page *page)
2061 struct page *head = compound_head(page);
2063 __unlock_page_memcg(page_memcg(head));
2065 EXPORT_SYMBOL(unlock_page_memcg);
2068 #ifdef CONFIG_MEMCG_KMEM
2069 struct obj_cgroup *cached_objcg;
2070 struct pglist_data *cached_pgdat;
2071 unsigned int nr_bytes;
2072 int nr_slab_reclaimable_b;
2073 int nr_slab_unreclaimable_b;
2079 struct memcg_stock_pcp {
2080 struct mem_cgroup *cached; /* this never be root cgroup */
2081 unsigned int nr_pages;
2082 struct obj_stock task_obj;
2083 struct obj_stock irq_obj;
2085 struct work_struct work;
2086 unsigned long flags;
2087 #define FLUSHING_CACHED_CHARGE 0
2089 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
2090 static DEFINE_MUTEX(percpu_charge_mutex);
2092 #ifdef CONFIG_MEMCG_KMEM
2093 static void drain_obj_stock(struct obj_stock *stock);
2094 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2095 struct mem_cgroup *root_memcg);
2098 static inline void drain_obj_stock(struct obj_stock *stock)
2101 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
2102 struct mem_cgroup *root_memcg)
2109 * Most kmem_cache_alloc() calls are from user context. The irq disable/enable
2110 * sequence used in this case to access content from object stock is slow.
2111 * To optimize for user context access, there are now two object stocks for
2112 * task context and interrupt context access respectively.
2114 * The task context object stock can be accessed by disabling preemption only
2115 * which is cheap in non-preempt kernel. The interrupt context object stock
2116 * can only be accessed after disabling interrupt. User context code can
2117 * access interrupt object stock, but not vice versa.
2119 static inline struct obj_stock *get_obj_stock(unsigned long *pflags)
2121 struct memcg_stock_pcp *stock;
2123 if (likely(in_task())) {
2126 stock = this_cpu_ptr(&memcg_stock);
2127 return &stock->task_obj;
2130 local_irq_save(*pflags);
2131 stock = this_cpu_ptr(&memcg_stock);
2132 return &stock->irq_obj;
2135 static inline void put_obj_stock(unsigned long flags)
2137 if (likely(in_task()))
2140 local_irq_restore(flags);
2144 * consume_stock: Try to consume stocked charge on this cpu.
2145 * @memcg: memcg to consume from.
2146 * @nr_pages: how many pages to charge.
2148 * The charges will only happen if @memcg matches the current cpu's memcg
2149 * stock, and at least @nr_pages are available in that stock. Failure to
2150 * service an allocation will refill the stock.
2152 * returns true if successful, false otherwise.
2154 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2156 struct memcg_stock_pcp *stock;
2157 unsigned long flags;
2160 if (nr_pages > MEMCG_CHARGE_BATCH)
2163 local_irq_save(flags);
2165 stock = this_cpu_ptr(&memcg_stock);
2166 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
2167 stock->nr_pages -= nr_pages;
2171 local_irq_restore(flags);
2177 * Returns stocks cached in percpu and reset cached information.
2179 static void drain_stock(struct memcg_stock_pcp *stock)
2181 struct mem_cgroup *old = stock->cached;
2186 if (stock->nr_pages) {
2187 page_counter_uncharge(&old->memory, stock->nr_pages);
2188 if (do_memsw_account())
2189 page_counter_uncharge(&old->memsw, stock->nr_pages);
2190 stock->nr_pages = 0;
2194 stock->cached = NULL;
2197 static void drain_local_stock(struct work_struct *dummy)
2199 struct memcg_stock_pcp *stock;
2200 unsigned long flags;
2203 * The only protection from memory hotplug vs. drain_stock races is
2204 * that we always operate on local CPU stock here with IRQ disabled
2206 local_irq_save(flags);
2208 stock = this_cpu_ptr(&memcg_stock);
2209 drain_obj_stock(&stock->irq_obj);
2211 drain_obj_stock(&stock->task_obj);
2213 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
2215 local_irq_restore(flags);
2219 * Cache charges(val) to local per_cpu area.
2220 * This will be consumed by consume_stock() function, later.
2222 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
2224 struct memcg_stock_pcp *stock;
2225 unsigned long flags;
2227 local_irq_save(flags);
2229 stock = this_cpu_ptr(&memcg_stock);
2230 if (stock->cached != memcg) { /* reset if necessary */
2232 css_get(&memcg->css);
2233 stock->cached = memcg;
2235 stock->nr_pages += nr_pages;
2237 if (stock->nr_pages > MEMCG_CHARGE_BATCH)
2240 local_irq_restore(flags);
2244 * Drains all per-CPU charge caches for given root_memcg resp. subtree
2245 * of the hierarchy under it.
2247 static void drain_all_stock(struct mem_cgroup *root_memcg)
2251 /* If someone's already draining, avoid adding running more workers. */
2252 if (!mutex_trylock(&percpu_charge_mutex))
2255 * Notify other cpus that system-wide "drain" is running
2256 * We do not care about races with the cpu hotplug because cpu down
2257 * as well as workers from this path always operate on the local
2258 * per-cpu data. CPU up doesn't touch memcg_stock at all.
2261 for_each_online_cpu(cpu) {
2262 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2263 struct mem_cgroup *memcg;
2267 memcg = stock->cached;
2268 if (memcg && stock->nr_pages &&
2269 mem_cgroup_is_descendant(memcg, root_memcg))
2271 if (obj_stock_flush_required(stock, root_memcg))
2276 !test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
2278 drain_local_stock(&stock->work);
2280 schedule_work_on(cpu, &stock->work);
2284 mutex_unlock(&percpu_charge_mutex);
2287 static void memcg_flush_lruvec_page_state(struct mem_cgroup *memcg, int cpu)
2291 for_each_node(nid) {
2292 struct mem_cgroup_per_node *pn = memcg->nodeinfo[nid];
2293 unsigned long stat[NR_VM_NODE_STAT_ITEMS];
2294 struct batched_lruvec_stat *lstatc;
2297 lstatc = per_cpu_ptr(pn->lruvec_stat_cpu, cpu);
2298 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
2299 stat[i] = lstatc->count[i];
2300 lstatc->count[i] = 0;
2304 for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
2305 atomic_long_add(stat[i], &pn->lruvec_stat[i]);
2306 } while ((pn = parent_nodeinfo(pn, nid)));
2310 static int memcg_hotplug_cpu_dead(unsigned int cpu)
2312 struct memcg_stock_pcp *stock;
2313 struct mem_cgroup *memcg;
2315 stock = &per_cpu(memcg_stock, cpu);
2318 for_each_mem_cgroup(memcg)
2319 memcg_flush_lruvec_page_state(memcg, cpu);
2324 static unsigned long reclaim_high(struct mem_cgroup *memcg,
2325 unsigned int nr_pages,
2328 unsigned long nr_reclaimed = 0;
2331 unsigned long pflags;
2333 if (page_counter_read(&memcg->memory) <=
2334 READ_ONCE(memcg->memory.high))
2337 memcg_memory_event(memcg, MEMCG_HIGH);
2339 psi_memstall_enter(&pflags);
2340 nr_reclaimed += try_to_free_mem_cgroup_pages(memcg, nr_pages,
2342 psi_memstall_leave(&pflags);
2343 } while ((memcg = parent_mem_cgroup(memcg)) &&
2344 !mem_cgroup_is_root(memcg));
2346 return nr_reclaimed;
2349 static void high_work_func(struct work_struct *work)
2351 struct mem_cgroup *memcg;
2353 memcg = container_of(work, struct mem_cgroup, high_work);
2354 reclaim_high(memcg, MEMCG_CHARGE_BATCH, GFP_KERNEL);
2358 * Clamp the maximum sleep time per allocation batch to 2 seconds. This is
2359 * enough to still cause a significant slowdown in most cases, while still
2360 * allowing diagnostics and tracing to proceed without becoming stuck.
2362 #define MEMCG_MAX_HIGH_DELAY_JIFFIES (2UL*HZ)
2365 * When calculating the delay, we use these either side of the exponentiation to
2366 * maintain precision and scale to a reasonable number of jiffies (see the table
2369 * - MEMCG_DELAY_PRECISION_SHIFT: Extra precision bits while translating the
2370 * overage ratio to a delay.
2371 * - MEMCG_DELAY_SCALING_SHIFT: The number of bits to scale down the
2372 * proposed penalty in order to reduce to a reasonable number of jiffies, and
2373 * to produce a reasonable delay curve.
2375 * MEMCG_DELAY_SCALING_SHIFT just happens to be a number that produces a
2376 * reasonable delay curve compared to precision-adjusted overage, not
2377 * penalising heavily at first, but still making sure that growth beyond the
2378 * limit penalises misbehaviour cgroups by slowing them down exponentially. For
2379 * example, with a high of 100 megabytes:
2381 * +-------+------------------------+
2382 * | usage | time to allocate in ms |
2383 * +-------+------------------------+
2405 * +-------+------------------------+
2407 #define MEMCG_DELAY_PRECISION_SHIFT 20
2408 #define MEMCG_DELAY_SCALING_SHIFT 14
2410 static u64 calculate_overage(unsigned long usage, unsigned long high)
2418 * Prevent division by 0 in overage calculation by acting as if
2419 * it was a threshold of 1 page
2421 high = max(high, 1UL);
2423 overage = usage - high;
2424 overage <<= MEMCG_DELAY_PRECISION_SHIFT;
2425 return div64_u64(overage, high);
2428 static u64 mem_find_max_overage(struct mem_cgroup *memcg)
2430 u64 overage, max_overage = 0;
2433 overage = calculate_overage(page_counter_read(&memcg->memory),
2434 READ_ONCE(memcg->memory.high));
2435 max_overage = max(overage, max_overage);
2436 } while ((memcg = parent_mem_cgroup(memcg)) &&
2437 !mem_cgroup_is_root(memcg));
2442 static u64 swap_find_max_overage(struct mem_cgroup *memcg)
2444 u64 overage, max_overage = 0;
2447 overage = calculate_overage(page_counter_read(&memcg->swap),
2448 READ_ONCE(memcg->swap.high));
2450 memcg_memory_event(memcg, MEMCG_SWAP_HIGH);
2451 max_overage = max(overage, max_overage);
2452 } while ((memcg = parent_mem_cgroup(memcg)) &&
2453 !mem_cgroup_is_root(memcg));
2459 * Get the number of jiffies that we should penalise a mischievous cgroup which
2460 * is exceeding its memory.high by checking both it and its ancestors.
2462 static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
2463 unsigned int nr_pages,
2466 unsigned long penalty_jiffies;
2472 * We use overage compared to memory.high to calculate the number of
2473 * jiffies to sleep (penalty_jiffies). Ideally this value should be
2474 * fairly lenient on small overages, and increasingly harsh when the
2475 * memcg in question makes it clear that it has no intention of stopping
2476 * its crazy behaviour, so we exponentially increase the delay based on
2479 penalty_jiffies = max_overage * max_overage * HZ;
2480 penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
2481 penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
2484 * Factor in the task's own contribution to the overage, such that four
2485 * N-sized allocations are throttled approximately the same as one
2486 * 4N-sized allocation.
2488 * MEMCG_CHARGE_BATCH pages is nominal, so work out how much smaller or
2489 * larger the current charge patch is than that.
2491 return penalty_jiffies * nr_pages / MEMCG_CHARGE_BATCH;
2495 * Scheduled by try_charge() to be executed from the userland return path
2496 * and reclaims memory over the high limit.
2498 void mem_cgroup_handle_over_high(void)
2500 unsigned long penalty_jiffies;
2501 unsigned long pflags;
2502 unsigned long nr_reclaimed;
2503 unsigned int nr_pages = current->memcg_nr_pages_over_high;
2504 int nr_retries = MAX_RECLAIM_RETRIES;
2505 struct mem_cgroup *memcg;
2506 bool in_retry = false;
2508 if (likely(!nr_pages))
2511 memcg = get_mem_cgroup_from_mm(current->mm);
2512 current->memcg_nr_pages_over_high = 0;
2516 * The allocating task should reclaim at least the batch size, but for
2517 * subsequent retries we only want to do what's necessary to prevent oom
2518 * or breaching resource isolation.
2520 * This is distinct from memory.max or page allocator behaviour because
2521 * memory.high is currently batched, whereas memory.max and the page
2522 * allocator run every time an allocation is made.
2524 nr_reclaimed = reclaim_high(memcg,
2525 in_retry ? SWAP_CLUSTER_MAX : nr_pages,
2529 * memory.high is breached and reclaim is unable to keep up. Throttle
2530 * allocators proactively to slow down excessive growth.
2532 penalty_jiffies = calculate_high_delay(memcg, nr_pages,
2533 mem_find_max_overage(memcg));
2535 penalty_jiffies += calculate_high_delay(memcg, nr_pages,
2536 swap_find_max_overage(memcg));
2539 * Clamp the max delay per usermode return so as to still keep the
2540 * application moving forwards and also permit diagnostics, albeit
2543 penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
2546 * Don't sleep if the amount of jiffies this memcg owes us is so low
2547 * that it's not even worth doing, in an attempt to be nice to those who
2548 * go only a small amount over their memory.high value and maybe haven't
2549 * been aggressively reclaimed enough yet.
2551 if (penalty_jiffies <= HZ / 100)
2555 * If reclaim is making forward progress but we're still over
2556 * memory.high, we want to encourage that rather than doing allocator
2559 if (nr_reclaimed || nr_retries--) {
2565 * If we exit early, we're guaranteed to die (since
2566 * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
2567 * need to account for any ill-begotten jiffies to pay them off later.
2569 psi_memstall_enter(&pflags);
2570 schedule_timeout_killable(penalty_jiffies);
2571 psi_memstall_leave(&pflags);
2574 css_put(&memcg->css);
2577 static int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask,
2578 unsigned int nr_pages)
2580 unsigned int batch = max(MEMCG_CHARGE_BATCH, nr_pages);
2581 int nr_retries = MAX_RECLAIM_RETRIES;
2582 struct mem_cgroup *mem_over_limit;
2583 struct page_counter *counter;
2584 enum oom_status oom_status;
2585 unsigned long nr_reclaimed;
2586 bool may_swap = true;
2587 bool drained = false;
2588 unsigned long pflags;
2591 if (consume_stock(memcg, nr_pages))
2594 if (!do_memsw_account() ||
2595 page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2596 if (page_counter_try_charge(&memcg->memory, batch, &counter))
2598 if (do_memsw_account())
2599 page_counter_uncharge(&memcg->memsw, batch);
2600 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2602 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2606 if (batch > nr_pages) {
2612 * Memcg doesn't have a dedicated reserve for atomic
2613 * allocations. But like the global atomic pool, we need to
2614 * put the burden of reclaim on regular allocation requests
2615 * and let these go through as privileged allocations.
2617 if (gfp_mask & __GFP_ATOMIC)
2621 * Unlike in global OOM situations, memcg is not in a physical
2622 * memory shortage. Allow dying and OOM-killed tasks to
2623 * bypass the last charges so that they can exit quickly and
2624 * free their memory.
2626 if (unlikely(should_force_charge()))
2630 * Prevent unbounded recursion when reclaim operations need to
2631 * allocate memory. This might exceed the limits temporarily,
2632 * but we prefer facilitating memory reclaim and getting back
2633 * under the limit over triggering OOM kills in these cases.
2635 if (unlikely(current->flags & PF_MEMALLOC))
2638 if (unlikely(task_in_memcg_oom(current)))
2641 if (!gfpflags_allow_blocking(gfp_mask))
2644 memcg_memory_event(mem_over_limit, MEMCG_MAX);
2646 psi_memstall_enter(&pflags);
2647 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2648 gfp_mask, may_swap);
2649 psi_memstall_leave(&pflags);
2651 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2655 drain_all_stock(mem_over_limit);
2660 if (gfp_mask & __GFP_NORETRY)
2663 * Even though the limit is exceeded at this point, reclaim
2664 * may have been able to free some pages. Retry the charge
2665 * before killing the task.
2667 * Only for regular pages, though: huge pages are rather
2668 * unlikely to succeed so close to the limit, and we fall back
2669 * to regular pages anyway in case of failure.
2671 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2674 * At task move, charge accounts can be doubly counted. So, it's
2675 * better to wait until the end of task_move if something is going on.
2677 if (mem_cgroup_wait_acct_move(mem_over_limit))
2683 if (gfp_mask & __GFP_RETRY_MAYFAIL)
2686 if (fatal_signal_pending(current))
2690 * keep retrying as long as the memcg oom killer is able to make
2691 * a forward progress or bypass the charge if the oom killer
2692 * couldn't make any progress.
2694 oom_status = mem_cgroup_oom(mem_over_limit, gfp_mask,
2695 get_order(nr_pages * PAGE_SIZE));
2696 switch (oom_status) {
2698 nr_retries = MAX_RECLAIM_RETRIES;
2706 if (!(gfp_mask & __GFP_NOFAIL))
2710 * The allocation either can't fail or will lead to more memory
2711 * being freed very soon. Allow memory usage go over the limit
2712 * temporarily by force charging it.
2714 page_counter_charge(&memcg->memory, nr_pages);
2715 if (do_memsw_account())
2716 page_counter_charge(&memcg->memsw, nr_pages);
2721 if (batch > nr_pages)
2722 refill_stock(memcg, batch - nr_pages);
2725 * If the hierarchy is above the normal consumption range, schedule
2726 * reclaim on returning to userland. We can perform reclaim here
2727 * if __GFP_RECLAIM but let's always punt for simplicity and so that
2728 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2729 * not recorded as it most likely matches current's and won't
2730 * change in the meantime. As high limit is checked again before
2731 * reclaim, the cost of mismatch is negligible.
2734 bool mem_high, swap_high;
2736 mem_high = page_counter_read(&memcg->memory) >
2737 READ_ONCE(memcg->memory.high);
2738 swap_high = page_counter_read(&memcg->swap) >
2739 READ_ONCE(memcg->swap.high);
2741 /* Don't bother a random interrupted task */
2742 if (in_interrupt()) {
2744 schedule_work(&memcg->high_work);
2750 if (mem_high || swap_high) {
2752 * The allocating tasks in this cgroup will need to do
2753 * reclaim or be throttled to prevent further growth
2754 * of the memory or swap footprints.
2756 * Target some best-effort fairness between the tasks,
2757 * and distribute reclaim work and delay penalties
2758 * based on how much each task is actually allocating.
2760 current->memcg_nr_pages_over_high += batch;
2761 set_notify_resume(current);
2764 } while ((memcg = parent_mem_cgroup(memcg)));
2769 static inline int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2770 unsigned int nr_pages)
2772 if (mem_cgroup_is_root(memcg))
2775 return try_charge_memcg(memcg, gfp_mask, nr_pages);
2778 #if defined(CONFIG_MEMCG_KMEM) || defined(CONFIG_MMU)
2779 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2781 if (mem_cgroup_is_root(memcg))
2784 page_counter_uncharge(&memcg->memory, nr_pages);
2785 if (do_memsw_account())
2786 page_counter_uncharge(&memcg->memsw, nr_pages);
2790 static void commit_charge(struct page *page, struct mem_cgroup *memcg)
2792 VM_BUG_ON_PAGE(page_memcg(page), page);
2794 * Any of the following ensures page's memcg stability:
2798 * - lock_page_memcg()
2799 * - exclusive reference
2801 page->memcg_data = (unsigned long)memcg;
2804 static struct mem_cgroup *get_mem_cgroup_from_objcg(struct obj_cgroup *objcg)
2806 struct mem_cgroup *memcg;
2810 memcg = obj_cgroup_memcg(objcg);
2811 if (unlikely(!css_tryget(&memcg->css)))
2818 #ifdef CONFIG_MEMCG_KMEM
2820 * The allocated objcg pointers array is not accounted directly.
2821 * Moreover, it should not come from DMA buffer and is not readily
2822 * reclaimable. So those GFP bits should be masked off.
2824 #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT)
2826 int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
2827 gfp_t gfp, bool new_page)
2829 unsigned int objects = objs_per_slab_page(s, page);
2830 unsigned long memcg_data;
2833 gfp &= ~OBJCGS_CLEAR_MASK;
2834 vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp,
2839 memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS;
2842 * If the slab page is brand new and nobody can yet access
2843 * it's memcg_data, no synchronization is required and
2844 * memcg_data can be simply assigned.
2846 page->memcg_data = memcg_data;
2847 } else if (cmpxchg(&page->memcg_data, 0, memcg_data)) {
2849 * If the slab page is already in use, somebody can allocate
2850 * and assign obj_cgroups in parallel. In this case the existing
2851 * objcg vector should be reused.
2857 kmemleak_not_leak(vec);
2862 * Returns a pointer to the memory cgroup to which the kernel object is charged.
2864 * A passed kernel object can be a slab object or a generic kernel page, so
2865 * different mechanisms for getting the memory cgroup pointer should be used.
2866 * In certain cases (e.g. kernel stacks or large kmallocs with SLUB) the caller
2867 * can not know for sure how the kernel object is implemented.
2868 * mem_cgroup_from_obj() can be safely used in such cases.
2870 * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
2871 * cgroup_mutex, etc.
2873 struct mem_cgroup *mem_cgroup_from_obj(void *p)
2877 if (mem_cgroup_disabled())
2880 page = virt_to_head_page(p);
2883 * Slab objects are accounted individually, not per-page.
2884 * Memcg membership data for each individual object is saved in
2885 * the page->obj_cgroups.
2887 if (page_objcgs_check(page)) {
2888 struct obj_cgroup *objcg;
2891 off = obj_to_index(page->slab_cache, page, p);
2892 objcg = page_objcgs(page)[off];
2894 return obj_cgroup_memcg(objcg);
2900 * page_memcg_check() is used here, because page_has_obj_cgroups()
2901 * check above could fail because the object cgroups vector wasn't set
2902 * at that moment, but it can be set concurrently.
2903 * page_memcg_check(page) will guarantee that a proper memory
2904 * cgroup pointer or NULL will be returned.
2906 return page_memcg_check(page);
2909 __always_inline struct obj_cgroup *get_obj_cgroup_from_current(void)
2911 struct obj_cgroup *objcg = NULL;
2912 struct mem_cgroup *memcg;
2914 if (memcg_kmem_bypass())
2918 if (unlikely(active_memcg()))
2919 memcg = active_memcg();
2921 memcg = mem_cgroup_from_task(current);
2923 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
2924 objcg = rcu_dereference(memcg->objcg);
2925 if (objcg && obj_cgroup_tryget(objcg))
2934 static int memcg_alloc_cache_id(void)
2939 id = ida_simple_get(&memcg_cache_ida,
2940 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2944 if (id < memcg_nr_cache_ids)
2948 * There's no space for the new id in memcg_caches arrays,
2949 * so we have to grow them.
2951 down_write(&memcg_cache_ids_sem);
2953 size = 2 * (id + 1);
2954 if (size < MEMCG_CACHES_MIN_SIZE)
2955 size = MEMCG_CACHES_MIN_SIZE;
2956 else if (size > MEMCG_CACHES_MAX_SIZE)
2957 size = MEMCG_CACHES_MAX_SIZE;
2959 err = memcg_update_all_list_lrus(size);
2961 memcg_nr_cache_ids = size;
2963 up_write(&memcg_cache_ids_sem);
2966 ida_simple_remove(&memcg_cache_ida, id);
2972 static void memcg_free_cache_id(int id)
2974 ida_simple_remove(&memcg_cache_ida, id);
2978 * obj_cgroup_uncharge_pages: uncharge a number of kernel pages from a objcg
2979 * @objcg: object cgroup to uncharge
2980 * @nr_pages: number of pages to uncharge
2982 static void obj_cgroup_uncharge_pages(struct obj_cgroup *objcg,
2983 unsigned int nr_pages)
2985 struct mem_cgroup *memcg;
2987 memcg = get_mem_cgroup_from_objcg(objcg);
2989 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
2990 page_counter_uncharge(&memcg->kmem, nr_pages);
2991 refill_stock(memcg, nr_pages);
2993 css_put(&memcg->css);
2997 * obj_cgroup_charge_pages: charge a number of kernel pages to a objcg
2998 * @objcg: object cgroup to charge
2999 * @gfp: reclaim mode
3000 * @nr_pages: number of pages to charge
3002 * Returns 0 on success, an error code on failure.
3004 static int obj_cgroup_charge_pages(struct obj_cgroup *objcg, gfp_t gfp,
3005 unsigned int nr_pages)
3007 struct page_counter *counter;
3008 struct mem_cgroup *memcg;
3011 memcg = get_mem_cgroup_from_objcg(objcg);
3013 ret = try_charge_memcg(memcg, gfp, nr_pages);
3017 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
3018 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
3021 * Enforce __GFP_NOFAIL allocation because callers are not
3022 * prepared to see failures and likely do not have any failure
3025 if (gfp & __GFP_NOFAIL) {
3026 page_counter_charge(&memcg->kmem, nr_pages);
3029 cancel_charge(memcg, nr_pages);
3033 css_put(&memcg->css);
3039 * __memcg_kmem_charge_page: charge a kmem page to the current memory cgroup
3040 * @page: page to charge
3041 * @gfp: reclaim mode
3042 * @order: allocation order
3044 * Returns 0 on success, an error code on failure.
3046 int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
3048 struct obj_cgroup *objcg;
3051 objcg = get_obj_cgroup_from_current();
3053 ret = obj_cgroup_charge_pages(objcg, gfp, 1 << order);
3055 page->memcg_data = (unsigned long)objcg |
3059 obj_cgroup_put(objcg);
3065 * __memcg_kmem_uncharge_page: uncharge a kmem page
3066 * @page: page to uncharge
3067 * @order: allocation order
3069 void __memcg_kmem_uncharge_page(struct page *page, int order)
3071 struct obj_cgroup *objcg;
3072 unsigned int nr_pages = 1 << order;
3074 if (!PageMemcgKmem(page))
3077 objcg = __page_objcg(page);
3078 obj_cgroup_uncharge_pages(objcg, nr_pages);
3079 page->memcg_data = 0;
3080 obj_cgroup_put(objcg);
3083 void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat,
3084 enum node_stat_item idx, int nr)
3086 unsigned long flags;
3087 struct obj_stock *stock = get_obj_stock(&flags);
3091 * Save vmstat data in stock and skip vmstat array update unless
3092 * accumulating over a page of vmstat data or when pgdat or idx
3095 if (stock->cached_objcg != objcg) {
3096 drain_obj_stock(stock);
3097 obj_cgroup_get(objcg);
3098 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3099 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3100 stock->cached_objcg = objcg;
3101 stock->cached_pgdat = pgdat;
3102 } else if (stock->cached_pgdat != pgdat) {
3103 /* Flush the existing cached vmstat data */
3104 if (stock->nr_slab_reclaimable_b) {
3105 mod_objcg_mlstate(objcg, pgdat, NR_SLAB_RECLAIMABLE_B,
3106 stock->nr_slab_reclaimable_b);
3107 stock->nr_slab_reclaimable_b = 0;
3109 if (stock->nr_slab_unreclaimable_b) {
3110 mod_objcg_mlstate(objcg, pgdat, NR_SLAB_UNRECLAIMABLE_B,
3111 stock->nr_slab_unreclaimable_b);
3112 stock->nr_slab_unreclaimable_b = 0;
3114 stock->cached_pgdat = pgdat;
3117 bytes = (idx == NR_SLAB_RECLAIMABLE_B) ? &stock->nr_slab_reclaimable_b
3118 : &stock->nr_slab_unreclaimable_b;
3120 * Even for large object >= PAGE_SIZE, the vmstat data will still be
3121 * cached locally at least once before pushing it out.
3128 if (abs(*bytes) > PAGE_SIZE) {
3136 mod_objcg_mlstate(objcg, pgdat, idx, nr);
3138 put_obj_stock(flags);
3141 static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
3143 unsigned long flags;
3144 struct obj_stock *stock = get_obj_stock(&flags);
3147 if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
3148 stock->nr_bytes -= nr_bytes;
3152 put_obj_stock(flags);
3157 static void drain_obj_stock(struct obj_stock *stock)
3159 struct obj_cgroup *old = stock->cached_objcg;
3164 if (stock->nr_bytes) {
3165 unsigned int nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3166 unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
3169 obj_cgroup_uncharge_pages(old, nr_pages);
3172 * The leftover is flushed to the centralized per-memcg value.
3173 * On the next attempt to refill obj stock it will be moved
3174 * to a per-cpu stock (probably, on an other CPU), see
3175 * refill_obj_stock().
3177 * How often it's flushed is a trade-off between the memory
3178 * limit enforcement accuracy and potential CPU contention,
3179 * so it might be changed in the future.
3181 atomic_add(nr_bytes, &old->nr_charged_bytes);
3182 stock->nr_bytes = 0;
3186 * Flush the vmstat data in current stock
3188 if (stock->nr_slab_reclaimable_b || stock->nr_slab_unreclaimable_b) {
3189 if (stock->nr_slab_reclaimable_b) {
3190 mod_objcg_mlstate(old, stock->cached_pgdat,
3191 NR_SLAB_RECLAIMABLE_B,
3192 stock->nr_slab_reclaimable_b);
3193 stock->nr_slab_reclaimable_b = 0;
3195 if (stock->nr_slab_unreclaimable_b) {
3196 mod_objcg_mlstate(old, stock->cached_pgdat,
3197 NR_SLAB_UNRECLAIMABLE_B,
3198 stock->nr_slab_unreclaimable_b);
3199 stock->nr_slab_unreclaimable_b = 0;
3201 stock->cached_pgdat = NULL;
3204 obj_cgroup_put(old);
3205 stock->cached_objcg = NULL;
3208 static bool obj_stock_flush_required(struct memcg_stock_pcp *stock,
3209 struct mem_cgroup *root_memcg)
3211 struct mem_cgroup *memcg;
3213 if (in_task() && stock->task_obj.cached_objcg) {
3214 memcg = obj_cgroup_memcg(stock->task_obj.cached_objcg);
3215 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3218 if (stock->irq_obj.cached_objcg) {
3219 memcg = obj_cgroup_memcg(stock->irq_obj.cached_objcg);
3220 if (memcg && mem_cgroup_is_descendant(memcg, root_memcg))
3227 static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes,
3228 bool allow_uncharge)
3230 unsigned long flags;
3231 struct obj_stock *stock = get_obj_stock(&flags);
3232 unsigned int nr_pages = 0;
3234 if (stock->cached_objcg != objcg) { /* reset if necessary */
3235 drain_obj_stock(stock);
3236 obj_cgroup_get(objcg);
3237 stock->cached_objcg = objcg;
3238 stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes)
3239 ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0;
3240 allow_uncharge = true; /* Allow uncharge when objcg changes */
3242 stock->nr_bytes += nr_bytes;
3244 if (allow_uncharge && (stock->nr_bytes > PAGE_SIZE)) {
3245 nr_pages = stock->nr_bytes >> PAGE_SHIFT;
3246 stock->nr_bytes &= (PAGE_SIZE - 1);
3249 put_obj_stock(flags);
3252 obj_cgroup_uncharge_pages(objcg, nr_pages);
3255 int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
3257 unsigned int nr_pages, nr_bytes;
3260 if (consume_obj_stock(objcg, size))
3264 * In theory, objcg->nr_charged_bytes can have enough
3265 * pre-charged bytes to satisfy the allocation. However,
3266 * flushing objcg->nr_charged_bytes requires two atomic
3267 * operations, and objcg->nr_charged_bytes can't be big.
3268 * The shared objcg->nr_charged_bytes can also become a
3269 * performance bottleneck if all tasks of the same memcg are
3270 * trying to update it. So it's better to ignore it and try
3271 * grab some new pages. The stock's nr_bytes will be flushed to
3272 * objcg->nr_charged_bytes later on when objcg changes.
3274 * The stock's nr_bytes may contain enough pre-charged bytes
3275 * to allow one less page from being charged, but we can't rely
3276 * on the pre-charged bytes not being changed outside of
3277 * consume_obj_stock() or refill_obj_stock(). So ignore those
3278 * pre-charged bytes as well when charging pages. To avoid a
3279 * page uncharge right after a page charge, we set the
3280 * allow_uncharge flag to false when calling refill_obj_stock()
3281 * to temporarily allow the pre-charged bytes to exceed the page
3282 * size limit. The maximum reachable value of the pre-charged
3283 * bytes is (sizeof(object) + PAGE_SIZE - 2) if there is no data
3286 nr_pages = size >> PAGE_SHIFT;
3287 nr_bytes = size & (PAGE_SIZE - 1);
3292 ret = obj_cgroup_charge_pages(objcg, gfp, nr_pages);
3293 if (!ret && nr_bytes)
3294 refill_obj_stock(objcg, PAGE_SIZE - nr_bytes, false);
3299 void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size)
3301 refill_obj_stock(objcg, size, true);
3304 #endif /* CONFIG_MEMCG_KMEM */
3307 * Because page_memcg(head) is not set on tails, set it now.
3309 void split_page_memcg(struct page *head, unsigned int nr)
3311 struct mem_cgroup *memcg = page_memcg(head);
3314 if (mem_cgroup_disabled() || !memcg)
3317 for (i = 1; i < nr; i++)
3318 head[i].memcg_data = head->memcg_data;
3320 if (PageMemcgKmem(head))
3321 obj_cgroup_get_many(__page_objcg(head), nr - 1);
3323 css_get_many(&memcg->css, nr - 1);
3326 #ifdef CONFIG_MEMCG_SWAP
3328 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
3329 * @entry: swap entry to be moved
3330 * @from: mem_cgroup which the entry is moved from
3331 * @to: mem_cgroup which the entry is moved to
3333 * It succeeds only when the swap_cgroup's record for this entry is the same
3334 * as the mem_cgroup's id of @from.
3336 * Returns 0 on success, -EINVAL on failure.
3338 * The caller must have charged to @to, IOW, called page_counter_charge() about
3339 * both res and memsw, and called css_get().
3341 static int mem_cgroup_move_swap_account(swp_entry_t entry,
3342 struct mem_cgroup *from, struct mem_cgroup *to)
3344 unsigned short old_id, new_id;
3346 old_id = mem_cgroup_id(from);
3347 new_id = mem_cgroup_id(to);
3349 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
3350 mod_memcg_state(from, MEMCG_SWAP, -1);
3351 mod_memcg_state(to, MEMCG_SWAP, 1);
3357 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3358 struct mem_cgroup *from, struct mem_cgroup *to)
3364 static DEFINE_MUTEX(memcg_max_mutex);
3366 static int mem_cgroup_resize_max(struct mem_cgroup *memcg,
3367 unsigned long max, bool memsw)
3369 bool enlarge = false;
3370 bool drained = false;
3372 bool limits_invariant;
3373 struct page_counter *counter = memsw ? &memcg->memsw : &memcg->memory;
3376 if (signal_pending(current)) {
3381 mutex_lock(&memcg_max_mutex);
3383 * Make sure that the new limit (memsw or memory limit) doesn't
3384 * break our basic invariant rule memory.max <= memsw.max.
3386 limits_invariant = memsw ? max >= READ_ONCE(memcg->memory.max) :
3387 max <= memcg->memsw.max;
3388 if (!limits_invariant) {
3389 mutex_unlock(&memcg_max_mutex);
3393 if (max > counter->max)
3395 ret = page_counter_set_max(counter, max);
3396 mutex_unlock(&memcg_max_mutex);
3402 drain_all_stock(memcg);
3407 if (!try_to_free_mem_cgroup_pages(memcg, 1,
3408 GFP_KERNEL, !memsw)) {
3414 if (!ret && enlarge)
3415 memcg_oom_recover(memcg);
3420 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3422 unsigned long *total_scanned)
3424 unsigned long nr_reclaimed = 0;
3425 struct mem_cgroup_per_node *mz, *next_mz = NULL;
3426 unsigned long reclaimed;
3428 struct mem_cgroup_tree_per_node *mctz;
3429 unsigned long excess;
3430 unsigned long nr_scanned;
3435 mctz = soft_limit_tree_node(pgdat->node_id);
3438 * Do not even bother to check the largest node if the root
3439 * is empty. Do it lockless to prevent lock bouncing. Races
3440 * are acceptable as soft limit is best effort anyway.
3442 if (!mctz || RB_EMPTY_ROOT(&mctz->rb_root))
3446 * This loop can run a while, specially if mem_cgroup's continuously
3447 * keep exceeding their soft limit and putting the system under
3454 mz = mem_cgroup_largest_soft_limit_node(mctz);
3459 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3460 gfp_mask, &nr_scanned);
3461 nr_reclaimed += reclaimed;
3462 *total_scanned += nr_scanned;
3463 spin_lock_irq(&mctz->lock);
3464 __mem_cgroup_remove_exceeded(mz, mctz);
3467 * If we failed to reclaim anything from this memory cgroup
3468 * it is time to move on to the next cgroup
3472 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
3474 excess = soft_limit_excess(mz->memcg);
3476 * One school of thought says that we should not add
3477 * back the node to the tree if reclaim returns 0.
3478 * But our reclaim could return 0, simply because due
3479 * to priority we are exposing a smaller subset of
3480 * memory to reclaim from. Consider this as a longer
3483 /* If excess == 0, no tree ops */
3484 __mem_cgroup_insert_exceeded(mz, mctz, excess);
3485 spin_unlock_irq(&mctz->lock);
3486 css_put(&mz->memcg->css);
3489 * Could not reclaim anything and there are no more
3490 * mem cgroups to try or we seem to be looping without
3491 * reclaiming anything.
3493 if (!nr_reclaimed &&
3495 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3497 } while (!nr_reclaimed);
3499 css_put(&next_mz->memcg->css);
3500 return nr_reclaimed;
3504 * Reclaims as many pages from the given memcg as possible.
3506 * Caller is responsible for holding css reference for memcg.
3508 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
3510 int nr_retries = MAX_RECLAIM_RETRIES;
3512 /* we call try-to-free pages for make this cgroup empty */
3513 lru_add_drain_all();
3515 drain_all_stock(memcg);
3517 /* try to free all pages in this cgroup */
3518 while (nr_retries && page_counter_read(&memcg->memory)) {
3521 if (signal_pending(current))
3524 progress = try_to_free_mem_cgroup_pages(memcg, 1,
3528 /* maybe some writeback is necessary */
3529 congestion_wait(BLK_RW_ASYNC, HZ/10);
3537 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
3538 char *buf, size_t nbytes,
3541 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3543 if (mem_cgroup_is_root(memcg))
3545 return mem_cgroup_force_empty(memcg) ?: nbytes;
3548 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
3554 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
3555 struct cftype *cft, u64 val)
3560 pr_warn_once("Non-hierarchical mode is deprecated. "
3561 "Please report your usecase to linux-mm@kvack.org if you "
3562 "depend on this functionality.\n");
3567 static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
3571 if (mem_cgroup_is_root(memcg)) {
3572 cgroup_rstat_flush(memcg->css.cgroup);
3573 val = memcg_page_state(memcg, NR_FILE_PAGES) +
3574 memcg_page_state(memcg, NR_ANON_MAPPED);
3576 val += memcg_page_state(memcg, MEMCG_SWAP);
3579 val = page_counter_read(&memcg->memory);
3581 val = page_counter_read(&memcg->memsw);
3594 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
3597 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3598 struct page_counter *counter;
3600 switch (MEMFILE_TYPE(cft->private)) {
3602 counter = &memcg->memory;
3605 counter = &memcg->memsw;
3608 counter = &memcg->kmem;
3611 counter = &memcg->tcpmem;
3617 switch (MEMFILE_ATTR(cft->private)) {
3619 if (counter == &memcg->memory)
3620 return (u64)mem_cgroup_usage(memcg, false) * PAGE_SIZE;
3621 if (counter == &memcg->memsw)
3622 return (u64)mem_cgroup_usage(memcg, true) * PAGE_SIZE;
3623 return (u64)page_counter_read(counter) * PAGE_SIZE;
3625 return (u64)counter->max * PAGE_SIZE;
3627 return (u64)counter->watermark * PAGE_SIZE;
3629 return counter->failcnt;
3630 case RES_SOFT_LIMIT:
3631 return (u64)memcg->soft_limit * PAGE_SIZE;
3637 #ifdef CONFIG_MEMCG_KMEM
3638 static int memcg_online_kmem(struct mem_cgroup *memcg)
3640 struct obj_cgroup *objcg;
3643 if (cgroup_memory_nokmem)
3646 BUG_ON(memcg->kmemcg_id >= 0);
3647 BUG_ON(memcg->kmem_state);
3649 memcg_id = memcg_alloc_cache_id();
3653 objcg = obj_cgroup_alloc();
3655 memcg_free_cache_id(memcg_id);
3658 objcg->memcg = memcg;
3659 rcu_assign_pointer(memcg->objcg, objcg);
3661 static_branch_enable(&memcg_kmem_enabled_key);
3663 memcg->kmemcg_id = memcg_id;
3664 memcg->kmem_state = KMEM_ONLINE;
3669 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3671 struct cgroup_subsys_state *css;
3672 struct mem_cgroup *parent, *child;
3675 if (memcg->kmem_state != KMEM_ONLINE)
3678 memcg->kmem_state = KMEM_ALLOCATED;
3680 parent = parent_mem_cgroup(memcg);
3682 parent = root_mem_cgroup;
3684 memcg_reparent_objcgs(memcg, parent);
3686 kmemcg_id = memcg->kmemcg_id;
3687 BUG_ON(kmemcg_id < 0);
3690 * Change kmemcg_id of this cgroup and all its descendants to the
3691 * parent's id, and then move all entries from this cgroup's list_lrus
3692 * to ones of the parent. After we have finished, all list_lrus
3693 * corresponding to this cgroup are guaranteed to remain empty. The
3694 * ordering is imposed by list_lru_node->lock taken by
3695 * memcg_drain_all_list_lrus().
3697 rcu_read_lock(); /* can be called from css_free w/o cgroup_mutex */
3698 css_for_each_descendant_pre(css, &memcg->css) {
3699 child = mem_cgroup_from_css(css);
3700 BUG_ON(child->kmemcg_id != kmemcg_id);
3701 child->kmemcg_id = parent->kmemcg_id;
3705 memcg_drain_all_list_lrus(kmemcg_id, parent);
3707 memcg_free_cache_id(kmemcg_id);
3710 static void memcg_free_kmem(struct mem_cgroup *memcg)
3712 /* css_alloc() failed, offlining didn't happen */
3713 if (unlikely(memcg->kmem_state == KMEM_ONLINE))
3714 memcg_offline_kmem(memcg);
3717 static int memcg_online_kmem(struct mem_cgroup *memcg)
3721 static void memcg_offline_kmem(struct mem_cgroup *memcg)
3724 static void memcg_free_kmem(struct mem_cgroup *memcg)
3727 #endif /* CONFIG_MEMCG_KMEM */
3729 static int memcg_update_kmem_max(struct mem_cgroup *memcg,
3734 mutex_lock(&memcg_max_mutex);
3735 ret = page_counter_set_max(&memcg->kmem, max);
3736 mutex_unlock(&memcg_max_mutex);
3740 static int memcg_update_tcp_max(struct mem_cgroup *memcg, unsigned long max)
3744 mutex_lock(&memcg_max_mutex);
3746 ret = page_counter_set_max(&memcg->tcpmem, max);
3750 if (!memcg->tcpmem_active) {
3752 * The active flag needs to be written after the static_key
3753 * update. This is what guarantees that the socket activation
3754 * function is the last one to run. See mem_cgroup_sk_alloc()
3755 * for details, and note that we don't mark any socket as
3756 * belonging to this memcg until that flag is up.
3758 * We need to do this, because static_keys will span multiple
3759 * sites, but we can't control their order. If we mark a socket
3760 * as accounted, but the accounting functions are not patched in
3761 * yet, we'll lose accounting.
3763 * We never race with the readers in mem_cgroup_sk_alloc(),
3764 * because when this value change, the code to process it is not
3767 static_branch_inc(&memcg_sockets_enabled_key);
3768 memcg->tcpmem_active = true;
3771 mutex_unlock(&memcg_max_mutex);
3776 * The user of this function is...
3779 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3780 char *buf, size_t nbytes, loff_t off)
3782 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3783 unsigned long nr_pages;
3786 buf = strstrip(buf);
3787 ret = page_counter_memparse(buf, "-1", &nr_pages);
3791 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3793 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3797 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3799 ret = mem_cgroup_resize_max(memcg, nr_pages, false);
3802 ret = mem_cgroup_resize_max(memcg, nr_pages, true);
3805 pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. "
3806 "Please report your usecase to linux-mm@kvack.org if you "
3807 "depend on this functionality.\n");
3808 ret = memcg_update_kmem_max(memcg, nr_pages);
3811 ret = memcg_update_tcp_max(memcg, nr_pages);
3815 case RES_SOFT_LIMIT:
3816 memcg->soft_limit = nr_pages;
3820 return ret ?: nbytes;
3823 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3824 size_t nbytes, loff_t off)
3826 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3827 struct page_counter *counter;
3829 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3831 counter = &memcg->memory;
3834 counter = &memcg->memsw;
3837 counter = &memcg->kmem;
3840 counter = &memcg->tcpmem;
3846 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3848 page_counter_reset_watermark(counter);
3851 counter->failcnt = 0;
3860 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3863 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3867 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3868 struct cftype *cft, u64 val)
3870 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3872 if (val & ~MOVE_MASK)
3876 * No kind of locking is needed in here, because ->can_attach() will
3877 * check this value once in the beginning of the process, and then carry
3878 * on with stale data. This means that changes to this value will only
3879 * affect task migrations starting after the change.
3881 memcg->move_charge_at_immigrate = val;
3885 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3886 struct cftype *cft, u64 val)
3894 #define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
3895 #define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
3896 #define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
3898 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
3899 int nid, unsigned int lru_mask, bool tree)
3901 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
3902 unsigned long nr = 0;
3905 VM_BUG_ON((unsigned)nid >= nr_node_ids);
3908 if (!(BIT(lru) & lru_mask))
3911 nr += lruvec_page_state(lruvec, NR_LRU_BASE + lru);
3913 nr += lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
3918 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
3919 unsigned int lru_mask,
3922 unsigned long nr = 0;
3926 if (!(BIT(lru) & lru_mask))
3929 nr += memcg_page_state(memcg, NR_LRU_BASE + lru);
3931 nr += memcg_page_state_local(memcg, NR_LRU_BASE + lru);
3936 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3940 unsigned int lru_mask;
3943 static const struct numa_stat stats[] = {
3944 { "total", LRU_ALL },
3945 { "file", LRU_ALL_FILE },
3946 { "anon", LRU_ALL_ANON },
3947 { "unevictable", BIT(LRU_UNEVICTABLE) },
3949 const struct numa_stat *stat;
3951 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
3953 cgroup_rstat_flush(memcg->css.cgroup);
3955 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3956 seq_printf(m, "%s=%lu", stat->name,
3957 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3959 for_each_node_state(nid, N_MEMORY)
3960 seq_printf(m, " N%d=%lu", nid,
3961 mem_cgroup_node_nr_lru_pages(memcg, nid,
3962 stat->lru_mask, false));
3966 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3968 seq_printf(m, "hierarchical_%s=%lu", stat->name,
3969 mem_cgroup_nr_lru_pages(memcg, stat->lru_mask,
3971 for_each_node_state(nid, N_MEMORY)
3972 seq_printf(m, " N%d=%lu", nid,
3973 mem_cgroup_node_nr_lru_pages(memcg, nid,
3974 stat->lru_mask, true));
3980 #endif /* CONFIG_NUMA */
3982 static const unsigned int memcg1_stats[] = {
3985 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
3995 static const char *const memcg1_stat_names[] = {
3998 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4008 /* Universal VM events cgroup1 shows, original sort order */
4009 static const unsigned int memcg1_events[] = {
4016 static int memcg_stat_show(struct seq_file *m, void *v)
4018 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
4019 unsigned long memory, memsw;
4020 struct mem_cgroup *mi;
4023 BUILD_BUG_ON(ARRAY_SIZE(memcg1_stat_names) != ARRAY_SIZE(memcg1_stats));
4025 cgroup_rstat_flush(memcg->css.cgroup);
4027 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4030 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4032 nr = memcg_page_state_local(memcg, memcg1_stats[i]);
4033 seq_printf(m, "%s %lu\n", memcg1_stat_names[i], nr * PAGE_SIZE);
4036 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4037 seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]),
4038 memcg_events_local(memcg, memcg1_events[i]));
4040 for (i = 0; i < NR_LRU_LISTS; i++)
4041 seq_printf(m, "%s %lu\n", lru_list_name(i),
4042 memcg_page_state_local(memcg, NR_LRU_BASE + i) *
4045 /* Hierarchical information */
4046 memory = memsw = PAGE_COUNTER_MAX;
4047 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
4048 memory = min(memory, READ_ONCE(mi->memory.max));
4049 memsw = min(memsw, READ_ONCE(mi->memsw.max));
4051 seq_printf(m, "hierarchical_memory_limit %llu\n",
4052 (u64)memory * PAGE_SIZE);
4053 if (do_memsw_account())
4054 seq_printf(m, "hierarchical_memsw_limit %llu\n",
4055 (u64)memsw * PAGE_SIZE);
4057 for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) {
4060 if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
4062 nr = memcg_page_state(memcg, memcg1_stats[i]);
4063 seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i],
4064 (u64)nr * PAGE_SIZE);
4067 for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
4068 seq_printf(m, "total_%s %llu\n",
4069 vm_event_name(memcg1_events[i]),
4070 (u64)memcg_events(memcg, memcg1_events[i]));
4072 for (i = 0; i < NR_LRU_LISTS; i++)
4073 seq_printf(m, "total_%s %llu\n", lru_list_name(i),
4074 (u64)memcg_page_state(memcg, NR_LRU_BASE + i) *
4077 #ifdef CONFIG_DEBUG_VM
4080 struct mem_cgroup_per_node *mz;
4081 unsigned long anon_cost = 0;
4082 unsigned long file_cost = 0;
4084 for_each_online_pgdat(pgdat) {
4085 mz = memcg->nodeinfo[pgdat->node_id];
4087 anon_cost += mz->lruvec.anon_cost;
4088 file_cost += mz->lruvec.file_cost;
4090 seq_printf(m, "anon_cost %lu\n", anon_cost);
4091 seq_printf(m, "file_cost %lu\n", file_cost);
4098 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
4101 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4103 return mem_cgroup_swappiness(memcg);
4106 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
4107 struct cftype *cft, u64 val)
4109 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4114 if (!mem_cgroup_is_root(memcg))
4115 memcg->swappiness = val;
4117 vm_swappiness = val;
4122 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4124 struct mem_cgroup_threshold_ary *t;
4125 unsigned long usage;
4130 t = rcu_dereference(memcg->thresholds.primary);
4132 t = rcu_dereference(memcg->memsw_thresholds.primary);
4137 usage = mem_cgroup_usage(memcg, swap);
4140 * current_threshold points to threshold just below or equal to usage.
4141 * If it's not true, a threshold was crossed after last
4142 * call of __mem_cgroup_threshold().
4144 i = t->current_threshold;
4147 * Iterate backward over array of thresholds starting from
4148 * current_threshold and check if a threshold is crossed.
4149 * If none of thresholds below usage is crossed, we read
4150 * only one element of the array here.
4152 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
4153 eventfd_signal(t->entries[i].eventfd, 1);
4155 /* i = current_threshold + 1 */
4159 * Iterate forward over array of thresholds starting from
4160 * current_threshold+1 and check if a threshold is crossed.
4161 * If none of thresholds above usage is crossed, we read
4162 * only one element of the array here.
4164 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
4165 eventfd_signal(t->entries[i].eventfd, 1);
4167 /* Update current_threshold */
4168 t->current_threshold = i - 1;
4173 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4176 __mem_cgroup_threshold(memcg, false);
4177 if (do_memsw_account())
4178 __mem_cgroup_threshold(memcg, true);
4180 memcg = parent_mem_cgroup(memcg);
4184 static int compare_thresholds(const void *a, const void *b)
4186 const struct mem_cgroup_threshold *_a = a;
4187 const struct mem_cgroup_threshold *_b = b;
4189 if (_a->threshold > _b->threshold)
4192 if (_a->threshold < _b->threshold)
4198 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4200 struct mem_cgroup_eventfd_list *ev;
4202 spin_lock(&memcg_oom_lock);
4204 list_for_each_entry(ev, &memcg->oom_notify, list)
4205 eventfd_signal(ev->eventfd, 1);
4207 spin_unlock(&memcg_oom_lock);
4211 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
4213 struct mem_cgroup *iter;
4215 for_each_mem_cgroup_tree(iter, memcg)
4216 mem_cgroup_oom_notify_cb(iter);
4219 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4220 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
4222 struct mem_cgroup_thresholds *thresholds;
4223 struct mem_cgroup_threshold_ary *new;
4224 unsigned long threshold;
4225 unsigned long usage;
4228 ret = page_counter_memparse(args, "-1", &threshold);
4232 mutex_lock(&memcg->thresholds_lock);
4235 thresholds = &memcg->thresholds;
4236 usage = mem_cgroup_usage(memcg, false);
4237 } else if (type == _MEMSWAP) {
4238 thresholds = &memcg->memsw_thresholds;
4239 usage = mem_cgroup_usage(memcg, true);
4243 /* Check if a threshold crossed before adding a new one */
4244 if (thresholds->primary)
4245 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4247 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4249 /* Allocate memory for new array of thresholds */
4250 new = kmalloc(struct_size(new, entries, size), GFP_KERNEL);
4257 /* Copy thresholds (if any) to new array */
4258 if (thresholds->primary)
4259 memcpy(new->entries, thresholds->primary->entries,
4260 flex_array_size(new, entries, size - 1));
4262 /* Add new threshold */
4263 new->entries[size - 1].eventfd = eventfd;
4264 new->entries[size - 1].threshold = threshold;
4266 /* Sort thresholds. Registering of new threshold isn't time-critical */
4267 sort(new->entries, size, sizeof(*new->entries),
4268 compare_thresholds, NULL);
4270 /* Find current threshold */
4271 new->current_threshold = -1;
4272 for (i = 0; i < size; i++) {
4273 if (new->entries[i].threshold <= usage) {
4275 * new->current_threshold will not be used until
4276 * rcu_assign_pointer(), so it's safe to increment
4279 ++new->current_threshold;
4284 /* Free old spare buffer and save old primary buffer as spare */
4285 kfree(thresholds->spare);
4286 thresholds->spare = thresholds->primary;
4288 rcu_assign_pointer(thresholds->primary, new);
4290 /* To be sure that nobody uses thresholds */
4294 mutex_unlock(&memcg->thresholds_lock);
4299 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
4300 struct eventfd_ctx *eventfd, const char *args)
4302 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
4305 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
4306 struct eventfd_ctx *eventfd, const char *args)
4308 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
4311 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4312 struct eventfd_ctx *eventfd, enum res_type type)
4314 struct mem_cgroup_thresholds *thresholds;
4315 struct mem_cgroup_threshold_ary *new;
4316 unsigned long usage;
4317 int i, j, size, entries;
4319 mutex_lock(&memcg->thresholds_lock);
4322 thresholds = &memcg->thresholds;
4323 usage = mem_cgroup_usage(memcg, false);
4324 } else if (type == _MEMSWAP) {
4325 thresholds = &memcg->memsw_thresholds;
4326 usage = mem_cgroup_usage(memcg, true);
4330 if (!thresholds->primary)
4333 /* Check if a threshold crossed before removing */
4334 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
4336 /* Calculate new number of threshold */
4338 for (i = 0; i < thresholds->primary->size; i++) {
4339 if (thresholds->primary->entries[i].eventfd != eventfd)
4345 new = thresholds->spare;
4347 /* If no items related to eventfd have been cleared, nothing to do */
4351 /* Set thresholds array to NULL if we don't have thresholds */
4360 /* Copy thresholds and find current threshold */
4361 new->current_threshold = -1;
4362 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4363 if (thresholds->primary->entries[i].eventfd == eventfd)
4366 new->entries[j] = thresholds->primary->entries[i];
4367 if (new->entries[j].threshold <= usage) {
4369 * new->current_threshold will not be used
4370 * until rcu_assign_pointer(), so it's safe to increment
4373 ++new->current_threshold;
4379 /* Swap primary and spare array */
4380 thresholds->spare = thresholds->primary;
4382 rcu_assign_pointer(thresholds->primary, new);
4384 /* To be sure that nobody uses thresholds */
4387 /* If all events are unregistered, free the spare array */
4389 kfree(thresholds->spare);
4390 thresholds->spare = NULL;
4393 mutex_unlock(&memcg->thresholds_lock);
4396 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4397 struct eventfd_ctx *eventfd)
4399 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
4402 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
4403 struct eventfd_ctx *eventfd)
4405 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
4408 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
4409 struct eventfd_ctx *eventfd, const char *args)
4411 struct mem_cgroup_eventfd_list *event;
4413 event = kmalloc(sizeof(*event), GFP_KERNEL);
4417 spin_lock(&memcg_oom_lock);
4419 event->eventfd = eventfd;
4420 list_add(&event->list, &memcg->oom_notify);
4422 /* already in OOM ? */
4423 if (memcg->under_oom)
4424 eventfd_signal(eventfd, 1);
4425 spin_unlock(&memcg_oom_lock);
4430 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
4431 struct eventfd_ctx *eventfd)
4433 struct mem_cgroup_eventfd_list *ev, *tmp;
4435 spin_lock(&memcg_oom_lock);
4437 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
4438 if (ev->eventfd == eventfd) {
4439 list_del(&ev->list);
4444 spin_unlock(&memcg_oom_lock);
4447 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
4449 struct mem_cgroup *memcg = mem_cgroup_from_seq(sf);
4451 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
4452 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
4453 seq_printf(sf, "oom_kill %lu\n",
4454 atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL]));
4458 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
4459 struct cftype *cft, u64 val)
4461 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4463 /* cannot set to root cgroup and only 0 and 1 are allowed */
4464 if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1)))
4467 memcg->oom_kill_disable = val;
4469 memcg_oom_recover(memcg);
4474 #ifdef CONFIG_CGROUP_WRITEBACK
4476 #include <trace/events/writeback.h>
4478 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4480 return wb_domain_init(&memcg->cgwb_domain, gfp);
4483 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4485 wb_domain_exit(&memcg->cgwb_domain);
4488 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4490 wb_domain_size_changed(&memcg->cgwb_domain);
4493 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
4495 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4497 if (!memcg->css.parent)
4500 return &memcg->cgwb_domain;
4504 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
4505 * @wb: bdi_writeback in question
4506 * @pfilepages: out parameter for number of file pages
4507 * @pheadroom: out parameter for number of allocatable pages according to memcg
4508 * @pdirty: out parameter for number of dirty pages
4509 * @pwriteback: out parameter for number of pages under writeback
4511 * Determine the numbers of file, headroom, dirty, and writeback pages in
4512 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
4513 * is a bit more involved.
4515 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
4516 * headroom is calculated as the lowest headroom of itself and the
4517 * ancestors. Note that this doesn't consider the actual amount of
4518 * available memory in the system. The caller should further cap
4519 * *@pheadroom accordingly.
4521 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
4522 unsigned long *pheadroom, unsigned long *pdirty,
4523 unsigned long *pwriteback)
4525 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4526 struct mem_cgroup *parent;
4528 cgroup_rstat_flush_irqsafe(memcg->css.cgroup);
4530 *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
4531 *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
4532 *pfilepages = memcg_page_state(memcg, NR_INACTIVE_FILE) +
4533 memcg_page_state(memcg, NR_ACTIVE_FILE);
4535 *pheadroom = PAGE_COUNTER_MAX;
4536 while ((parent = parent_mem_cgroup(memcg))) {
4537 unsigned long ceiling = min(READ_ONCE(memcg->memory.max),
4538 READ_ONCE(memcg->memory.high));
4539 unsigned long used = page_counter_read(&memcg->memory);
4541 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
4547 * Foreign dirty flushing
4549 * There's an inherent mismatch between memcg and writeback. The former
4550 * tracks ownership per-page while the latter per-inode. This was a
4551 * deliberate design decision because honoring per-page ownership in the
4552 * writeback path is complicated, may lead to higher CPU and IO overheads
4553 * and deemed unnecessary given that write-sharing an inode across
4554 * different cgroups isn't a common use-case.
4556 * Combined with inode majority-writer ownership switching, this works well
4557 * enough in most cases but there are some pathological cases. For
4558 * example, let's say there are two cgroups A and B which keep writing to
4559 * different but confined parts of the same inode. B owns the inode and
4560 * A's memory is limited far below B's. A's dirty ratio can rise enough to
4561 * trigger balance_dirty_pages() sleeps but B's can be low enough to avoid
4562 * triggering background writeback. A will be slowed down without a way to
4563 * make writeback of the dirty pages happen.
4565 * Conditions like the above can lead to a cgroup getting repeatedly and
4566 * severely throttled after making some progress after each
4567 * dirty_expire_interval while the underlying IO device is almost
4570 * Solving this problem completely requires matching the ownership tracking
4571 * granularities between memcg and writeback in either direction. However,
4572 * the more egregious behaviors can be avoided by simply remembering the
4573 * most recent foreign dirtying events and initiating remote flushes on
4574 * them when local writeback isn't enough to keep the memory clean enough.
4576 * The following two functions implement such mechanism. When a foreign
4577 * page - a page whose memcg and writeback ownerships don't match - is
4578 * dirtied, mem_cgroup_track_foreign_dirty() records the inode owning
4579 * bdi_writeback on the page owning memcg. When balance_dirty_pages()
4580 * decides that the memcg needs to sleep due to high dirty ratio, it calls
4581 * mem_cgroup_flush_foreign() which queues writeback on the recorded
4582 * foreign bdi_writebacks which haven't expired. Both the numbers of
4583 * recorded bdi_writebacks and concurrent in-flight foreign writebacks are
4584 * limited to MEMCG_CGWB_FRN_CNT.
4586 * The mechanism only remembers IDs and doesn't hold any object references.
4587 * As being wrong occasionally doesn't matter, updates and accesses to the
4588 * records are lockless and racy.
4590 void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
4591 struct bdi_writeback *wb)
4593 struct mem_cgroup *memcg = page_memcg(page);
4594 struct memcg_cgwb_frn *frn;
4595 u64 now = get_jiffies_64();
4596 u64 oldest_at = now;
4600 trace_track_foreign_dirty(page, wb);
4603 * Pick the slot to use. If there is already a slot for @wb, keep
4604 * using it. If not replace the oldest one which isn't being
4607 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4608 frn = &memcg->cgwb_frn[i];
4609 if (frn->bdi_id == wb->bdi->id &&
4610 frn->memcg_id == wb->memcg_css->id)
4612 if (time_before64(frn->at, oldest_at) &&
4613 atomic_read(&frn->done.cnt) == 1) {
4615 oldest_at = frn->at;
4619 if (i < MEMCG_CGWB_FRN_CNT) {
4621 * Re-using an existing one. Update timestamp lazily to
4622 * avoid making the cacheline hot. We want them to be
4623 * reasonably up-to-date and significantly shorter than
4624 * dirty_expire_interval as that's what expires the record.
4625 * Use the shorter of 1s and dirty_expire_interval / 8.
4627 unsigned long update_intv =
4628 min_t(unsigned long, HZ,
4629 msecs_to_jiffies(dirty_expire_interval * 10) / 8);
4631 if (time_before64(frn->at, now - update_intv))
4633 } else if (oldest >= 0) {
4634 /* replace the oldest free one */
4635 frn = &memcg->cgwb_frn[oldest];
4636 frn->bdi_id = wb->bdi->id;
4637 frn->memcg_id = wb->memcg_css->id;
4642 /* issue foreign writeback flushes for recorded foreign dirtying events */
4643 void mem_cgroup_flush_foreign(struct bdi_writeback *wb)
4645 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
4646 unsigned long intv = msecs_to_jiffies(dirty_expire_interval * 10);
4647 u64 now = jiffies_64;
4650 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++) {
4651 struct memcg_cgwb_frn *frn = &memcg->cgwb_frn[i];
4654 * If the record is older than dirty_expire_interval,
4655 * writeback on it has already started. No need to kick it
4656 * off again. Also, don't start a new one if there's
4657 * already one in flight.
4659 if (time_after64(frn->at, now - intv) &&
4660 atomic_read(&frn->done.cnt) == 1) {
4662 trace_flush_foreign(wb, frn->bdi_id, frn->memcg_id);
4663 cgroup_writeback_by_id(frn->bdi_id, frn->memcg_id, 0,
4664 WB_REASON_FOREIGN_FLUSH,
4670 #else /* CONFIG_CGROUP_WRITEBACK */
4672 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
4677 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
4681 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
4685 #endif /* CONFIG_CGROUP_WRITEBACK */
4688 * DO NOT USE IN NEW FILES.
4690 * "cgroup.event_control" implementation.
4692 * This is way over-engineered. It tries to support fully configurable
4693 * events for each user. Such level of flexibility is completely
4694 * unnecessary especially in the light of the planned unified hierarchy.
4696 * Please deprecate this and replace with something simpler if at all
4701 * Unregister event and free resources.
4703 * Gets called from workqueue.
4705 static void memcg_event_remove(struct work_struct *work)
4707 struct mem_cgroup_event *event =
4708 container_of(work, struct mem_cgroup_event, remove);
4709 struct mem_cgroup *memcg = event->memcg;
4711 remove_wait_queue(event->wqh, &event->wait);
4713 event->unregister_event(memcg, event->eventfd);
4715 /* Notify userspace the event is going away. */
4716 eventfd_signal(event->eventfd, 1);
4718 eventfd_ctx_put(event->eventfd);
4720 css_put(&memcg->css);
4724 * Gets called on EPOLLHUP on eventfd when user closes it.
4726 * Called with wqh->lock held and interrupts disabled.
4728 static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode,
4729 int sync, void *key)
4731 struct mem_cgroup_event *event =
4732 container_of(wait, struct mem_cgroup_event, wait);
4733 struct mem_cgroup *memcg = event->memcg;
4734 __poll_t flags = key_to_poll(key);
4736 if (flags & EPOLLHUP) {
4738 * If the event has been detached at cgroup removal, we
4739 * can simply return knowing the other side will cleanup
4742 * We can't race against event freeing since the other
4743 * side will require wqh->lock via remove_wait_queue(),
4746 spin_lock(&memcg->event_list_lock);
4747 if (!list_empty(&event->list)) {
4748 list_del_init(&event->list);
4750 * We are in atomic context, but cgroup_event_remove()
4751 * may sleep, so we have to call it in workqueue.
4753 schedule_work(&event->remove);
4755 spin_unlock(&memcg->event_list_lock);
4761 static void memcg_event_ptable_queue_proc(struct file *file,
4762 wait_queue_head_t *wqh, poll_table *pt)
4764 struct mem_cgroup_event *event =
4765 container_of(pt, struct mem_cgroup_event, pt);
4768 add_wait_queue(wqh, &event->wait);
4772 * DO NOT USE IN NEW FILES.
4774 * Parse input and register new cgroup event handler.
4776 * Input must be in format '<event_fd> <control_fd> <args>'.
4777 * Interpretation of args is defined by control file implementation.
4779 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
4780 char *buf, size_t nbytes, loff_t off)
4782 struct cgroup_subsys_state *css = of_css(of);
4783 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4784 struct mem_cgroup_event *event;
4785 struct cgroup_subsys_state *cfile_css;
4786 unsigned int efd, cfd;
4793 buf = strstrip(buf);
4795 efd = simple_strtoul(buf, &endp, 10);
4800 cfd = simple_strtoul(buf, &endp, 10);
4801 if ((*endp != ' ') && (*endp != '\0'))
4805 event = kzalloc(sizeof(*event), GFP_KERNEL);
4809 event->memcg = memcg;
4810 INIT_LIST_HEAD(&event->list);
4811 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
4812 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
4813 INIT_WORK(&event->remove, memcg_event_remove);
4821 event->eventfd = eventfd_ctx_fileget(efile.file);
4822 if (IS_ERR(event->eventfd)) {
4823 ret = PTR_ERR(event->eventfd);
4830 goto out_put_eventfd;
4833 /* the process need read permission on control file */
4834 /* AV: shouldn't we check that it's been opened for read instead? */
4835 ret = file_permission(cfile.file, MAY_READ);
4840 * Determine the event callbacks and set them in @event. This used
4841 * to be done via struct cftype but cgroup core no longer knows
4842 * about these events. The following is crude but the whole thing
4843 * is for compatibility anyway.
4845 * DO NOT ADD NEW FILES.
4847 name = cfile.file->f_path.dentry->d_name.name;
4849 if (!strcmp(name, "memory.usage_in_bytes")) {
4850 event->register_event = mem_cgroup_usage_register_event;
4851 event->unregister_event = mem_cgroup_usage_unregister_event;
4852 } else if (!strcmp(name, "memory.oom_control")) {
4853 event->register_event = mem_cgroup_oom_register_event;
4854 event->unregister_event = mem_cgroup_oom_unregister_event;
4855 } else if (!strcmp(name, "memory.pressure_level")) {
4856 event->register_event = vmpressure_register_event;
4857 event->unregister_event = vmpressure_unregister_event;
4858 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
4859 event->register_event = memsw_cgroup_usage_register_event;
4860 event->unregister_event = memsw_cgroup_usage_unregister_event;
4867 * Verify @cfile should belong to @css. Also, remaining events are
4868 * automatically removed on cgroup destruction but the removal is
4869 * asynchronous, so take an extra ref on @css.
4871 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4872 &memory_cgrp_subsys);
4874 if (IS_ERR(cfile_css))
4876 if (cfile_css != css) {
4881 ret = event->register_event(memcg, event->eventfd, buf);
4885 vfs_poll(efile.file, &event->pt);
4887 spin_lock(&memcg->event_list_lock);
4888 list_add(&event->list, &memcg->event_list);
4889 spin_unlock(&memcg->event_list_lock);
4901 eventfd_ctx_put(event->eventfd);
4910 static struct cftype mem_cgroup_legacy_files[] = {
4912 .name = "usage_in_bytes",
4913 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4914 .read_u64 = mem_cgroup_read_u64,
4917 .name = "max_usage_in_bytes",
4918 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4919 .write = mem_cgroup_reset,
4920 .read_u64 = mem_cgroup_read_u64,
4923 .name = "limit_in_bytes",
4924 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4925 .write = mem_cgroup_write,
4926 .read_u64 = mem_cgroup_read_u64,
4929 .name = "soft_limit_in_bytes",
4930 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4931 .write = mem_cgroup_write,
4932 .read_u64 = mem_cgroup_read_u64,
4936 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4937 .write = mem_cgroup_reset,
4938 .read_u64 = mem_cgroup_read_u64,
4942 .seq_show = memcg_stat_show,
4945 .name = "force_empty",
4946 .write = mem_cgroup_force_empty_write,
4949 .name = "use_hierarchy",
4950 .write_u64 = mem_cgroup_hierarchy_write,
4951 .read_u64 = mem_cgroup_hierarchy_read,
4954 .name = "cgroup.event_control", /* XXX: for compat */
4955 .write = memcg_write_event_control,
4956 .flags = CFTYPE_NO_PREFIX | CFTYPE_WORLD_WRITABLE,
4959 .name = "swappiness",
4960 .read_u64 = mem_cgroup_swappiness_read,
4961 .write_u64 = mem_cgroup_swappiness_write,
4964 .name = "move_charge_at_immigrate",
4965 .read_u64 = mem_cgroup_move_charge_read,
4966 .write_u64 = mem_cgroup_move_charge_write,
4969 .name = "oom_control",
4970 .seq_show = mem_cgroup_oom_control_read,
4971 .write_u64 = mem_cgroup_oom_control_write,
4972 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4975 .name = "pressure_level",
4979 .name = "numa_stat",
4980 .seq_show = memcg_numa_stat_show,
4984 .name = "kmem.limit_in_bytes",
4985 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4986 .write = mem_cgroup_write,
4987 .read_u64 = mem_cgroup_read_u64,
4990 .name = "kmem.usage_in_bytes",
4991 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4992 .read_u64 = mem_cgroup_read_u64,
4995 .name = "kmem.failcnt",
4996 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4997 .write = mem_cgroup_reset,
4998 .read_u64 = mem_cgroup_read_u64,
5001 .name = "kmem.max_usage_in_bytes",
5002 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
5003 .write = mem_cgroup_reset,
5004 .read_u64 = mem_cgroup_read_u64,
5006 #if defined(CONFIG_MEMCG_KMEM) && \
5007 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG))
5009 .name = "kmem.slabinfo",
5010 .seq_show = memcg_slab_show,
5014 .name = "kmem.tcp.limit_in_bytes",
5015 .private = MEMFILE_PRIVATE(_TCP, RES_LIMIT),
5016 .write = mem_cgroup_write,
5017 .read_u64 = mem_cgroup_read_u64,
5020 .name = "kmem.tcp.usage_in_bytes",
5021 .private = MEMFILE_PRIVATE(_TCP, RES_USAGE),
5022 .read_u64 = mem_cgroup_read_u64,
5025 .name = "kmem.tcp.failcnt",
5026 .private = MEMFILE_PRIVATE(_TCP, RES_FAILCNT),
5027 .write = mem_cgroup_reset,
5028 .read_u64 = mem_cgroup_read_u64,
5031 .name = "kmem.tcp.max_usage_in_bytes",
5032 .private = MEMFILE_PRIVATE(_TCP, RES_MAX_USAGE),
5033 .write = mem_cgroup_reset,
5034 .read_u64 = mem_cgroup_read_u64,
5036 { }, /* terminate */
5040 * Private memory cgroup IDR
5042 * Swap-out records and page cache shadow entries need to store memcg
5043 * references in constrained space, so we maintain an ID space that is
5044 * limited to 16 bit (MEM_CGROUP_ID_MAX), limiting the total number of
5045 * memory-controlled cgroups to 64k.
5047 * However, there usually are many references to the offline CSS after
5048 * the cgroup has been destroyed, such as page cache or reclaimable
5049 * slab objects, that don't need to hang on to the ID. We want to keep
5050 * those dead CSS from occupying IDs, or we might quickly exhaust the
5051 * relatively small ID space and prevent the creation of new cgroups
5052 * even when there are much fewer than 64k cgroups - possibly none.
5054 * Maintain a private 16-bit ID space for memcg, and allow the ID to
5055 * be freed and recycled when it's no longer needed, which is usually
5056 * when the CSS is offlined.
5058 * The only exception to that are records of swapped out tmpfs/shmem
5059 * pages that need to be attributed to live ancestors on swapin. But
5060 * those references are manageable from userspace.
5063 static DEFINE_IDR(mem_cgroup_idr);
5065 static void mem_cgroup_id_remove(struct mem_cgroup *memcg)
5067 if (memcg->id.id > 0) {
5068 idr_remove(&mem_cgroup_idr, memcg->id.id);
5073 static void __maybe_unused mem_cgroup_id_get_many(struct mem_cgroup *memcg,
5076 refcount_add(n, &memcg->id.ref);
5079 static void mem_cgroup_id_put_many(struct mem_cgroup *memcg, unsigned int n)
5081 if (refcount_sub_and_test(n, &memcg->id.ref)) {
5082 mem_cgroup_id_remove(memcg);
5084 /* Memcg ID pins CSS */
5085 css_put(&memcg->css);
5089 static inline void mem_cgroup_id_put(struct mem_cgroup *memcg)
5091 mem_cgroup_id_put_many(memcg, 1);
5095 * mem_cgroup_from_id - look up a memcg from a memcg id
5096 * @id: the memcg id to look up
5098 * Caller must hold rcu_read_lock().
5100 struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
5102 WARN_ON_ONCE(!rcu_read_lock_held());
5103 return idr_find(&mem_cgroup_idr, id);
5106 static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5108 struct mem_cgroup_per_node *pn;
5111 * This routine is called against possible nodes.
5112 * But it's BUG to call kmalloc() against offline node.
5114 * TODO: this routine can waste much memory for nodes which will
5115 * never be onlined. It's better to use memory hotplug callback
5118 if (!node_state(node, N_NORMAL_MEMORY))
5120 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
5124 pn->lruvec_stat_local = alloc_percpu_gfp(struct lruvec_stat,
5125 GFP_KERNEL_ACCOUNT);
5126 if (!pn->lruvec_stat_local) {
5131 pn->lruvec_stat_cpu = alloc_percpu_gfp(struct batched_lruvec_stat,
5132 GFP_KERNEL_ACCOUNT);
5133 if (!pn->lruvec_stat_cpu) {
5134 free_percpu(pn->lruvec_stat_local);
5139 lruvec_init(&pn->lruvec);
5140 pn->usage_in_excess = 0;
5141 pn->on_tree = false;
5144 memcg->nodeinfo[node] = pn;
5148 static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
5150 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
5155 free_percpu(pn->lruvec_stat_cpu);
5156 free_percpu(pn->lruvec_stat_local);
5160 static void __mem_cgroup_free(struct mem_cgroup *memcg)
5165 free_mem_cgroup_per_node_info(memcg, node);
5166 free_percpu(memcg->vmstats_percpu);
5170 static void mem_cgroup_free(struct mem_cgroup *memcg)
5174 memcg_wb_domain_exit(memcg);
5176 * Flush percpu lruvec stats to guarantee the value
5177 * correctness on parent's and all ancestor levels.
5179 for_each_online_cpu(cpu)
5180 memcg_flush_lruvec_page_state(memcg, cpu);
5181 __mem_cgroup_free(memcg);
5184 static struct mem_cgroup *mem_cgroup_alloc(void)
5186 struct mem_cgroup *memcg;
5189 int __maybe_unused i;
5190 long error = -ENOMEM;
5192 size = sizeof(struct mem_cgroup);
5193 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
5195 memcg = kzalloc(size, GFP_KERNEL);
5197 return ERR_PTR(error);
5199 memcg->id.id = idr_alloc(&mem_cgroup_idr, NULL,
5200 1, MEM_CGROUP_ID_MAX,
5202 if (memcg->id.id < 0) {
5203 error = memcg->id.id;
5207 memcg->vmstats_percpu = alloc_percpu_gfp(struct memcg_vmstats_percpu,
5208 GFP_KERNEL_ACCOUNT);
5209 if (!memcg->vmstats_percpu)
5213 if (alloc_mem_cgroup_per_node_info(memcg, node))
5216 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
5219 INIT_WORK(&memcg->high_work, high_work_func);
5220 INIT_LIST_HEAD(&memcg->oom_notify);
5221 mutex_init(&memcg->thresholds_lock);
5222 spin_lock_init(&memcg->move_lock);
5223 vmpressure_init(&memcg->vmpressure);
5224 INIT_LIST_HEAD(&memcg->event_list);
5225 spin_lock_init(&memcg->event_list_lock);
5226 memcg->socket_pressure = jiffies;
5227 #ifdef CONFIG_MEMCG_KMEM
5228 memcg->kmemcg_id = -1;
5229 INIT_LIST_HEAD(&memcg->objcg_list);
5231 #ifdef CONFIG_CGROUP_WRITEBACK
5232 INIT_LIST_HEAD(&memcg->cgwb_list);
5233 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5234 memcg->cgwb_frn[i].done =
5235 __WB_COMPLETION_INIT(&memcg_cgwb_frn_waitq);
5237 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5238 spin_lock_init(&memcg->deferred_split_queue.split_queue_lock);
5239 INIT_LIST_HEAD(&memcg->deferred_split_queue.split_queue);
5240 memcg->deferred_split_queue.split_queue_len = 0;
5242 idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
5245 mem_cgroup_id_remove(memcg);
5246 __mem_cgroup_free(memcg);
5247 return ERR_PTR(error);
5250 static struct cgroup_subsys_state * __ref
5251 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5253 struct mem_cgroup *parent = mem_cgroup_from_css(parent_css);
5254 struct mem_cgroup *memcg, *old_memcg;
5255 long error = -ENOMEM;
5257 old_memcg = set_active_memcg(parent);
5258 memcg = mem_cgroup_alloc();
5259 set_active_memcg(old_memcg);
5261 return ERR_CAST(memcg);
5263 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5264 memcg->soft_limit = PAGE_COUNTER_MAX;
5265 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5267 memcg->swappiness = mem_cgroup_swappiness(parent);
5268 memcg->oom_kill_disable = parent->oom_kill_disable;
5270 page_counter_init(&memcg->memory, &parent->memory);
5271 page_counter_init(&memcg->swap, &parent->swap);
5272 page_counter_init(&memcg->kmem, &parent->kmem);
5273 page_counter_init(&memcg->tcpmem, &parent->tcpmem);
5275 page_counter_init(&memcg->memory, NULL);
5276 page_counter_init(&memcg->swap, NULL);
5277 page_counter_init(&memcg->kmem, NULL);
5278 page_counter_init(&memcg->tcpmem, NULL);
5280 root_mem_cgroup = memcg;
5284 /* The following stuff does not apply to the root */
5285 error = memcg_online_kmem(memcg);
5289 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5290 static_branch_inc(&memcg_sockets_enabled_key);
5294 mem_cgroup_id_remove(memcg);
5295 mem_cgroup_free(memcg);
5296 return ERR_PTR(error);
5299 static int mem_cgroup_css_online(struct cgroup_subsys_state *css)
5301 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5304 * A memcg must be visible for expand_shrinker_info()
5305 * by the time the maps are allocated. So, we allocate maps
5306 * here, when for_each_mem_cgroup() can't skip it.
5308 if (alloc_shrinker_info(memcg)) {
5309 mem_cgroup_id_remove(memcg);
5313 /* Online state pins memcg ID, memcg ID pins CSS */
5314 refcount_set(&memcg->id.ref, 1);
5319 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
5321 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5322 struct mem_cgroup_event *event, *tmp;
5325 * Unregister events and notify userspace.
5326 * Notify userspace about cgroup removing only after rmdir of cgroup
5327 * directory to avoid race between userspace and kernelspace.
5329 spin_lock(&memcg->event_list_lock);
5330 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
5331 list_del_init(&event->list);
5332 schedule_work(&event->remove);
5334 spin_unlock(&memcg->event_list_lock);
5336 page_counter_set_min(&memcg->memory, 0);
5337 page_counter_set_low(&memcg->memory, 0);
5339 memcg_offline_kmem(memcg);
5340 reparent_shrinker_deferred(memcg);
5341 wb_memcg_offline(memcg);
5343 drain_all_stock(memcg);
5345 mem_cgroup_id_put(memcg);
5348 static void mem_cgroup_css_released(struct cgroup_subsys_state *css)
5350 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5352 invalidate_reclaim_iterators(memcg);
5355 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
5357 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5358 int __maybe_unused i;
5360 #ifdef CONFIG_CGROUP_WRITEBACK
5361 for (i = 0; i < MEMCG_CGWB_FRN_CNT; i++)
5362 wb_wait_for_completion(&memcg->cgwb_frn[i].done);
5364 if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
5365 static_branch_dec(&memcg_sockets_enabled_key);
5367 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
5368 static_branch_dec(&memcg_sockets_enabled_key);
5370 vmpressure_cleanup(&memcg->vmpressure);
5371 cancel_work_sync(&memcg->high_work);
5372 mem_cgroup_remove_from_trees(memcg);
5373 free_shrinker_info(memcg);
5374 memcg_free_kmem(memcg);
5375 mem_cgroup_free(memcg);
5379 * mem_cgroup_css_reset - reset the states of a mem_cgroup
5380 * @css: the target css
5382 * Reset the states of the mem_cgroup associated with @css. This is
5383 * invoked when the userland requests disabling on the default hierarchy
5384 * but the memcg is pinned through dependency. The memcg should stop
5385 * applying policies and should revert to the vanilla state as it may be
5386 * made visible again.
5388 * The current implementation only resets the essential configurations.
5389 * This needs to be expanded to cover all the visible parts.
5391 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
5393 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5395 page_counter_set_max(&memcg->memory, PAGE_COUNTER_MAX);
5396 page_counter_set_max(&memcg->swap, PAGE_COUNTER_MAX);
5397 page_counter_set_max(&memcg->kmem, PAGE_COUNTER_MAX);
5398 page_counter_set_max(&memcg->tcpmem, PAGE_COUNTER_MAX);
5399 page_counter_set_min(&memcg->memory, 0);
5400 page_counter_set_low(&memcg->memory, 0);
5401 page_counter_set_high(&memcg->memory, PAGE_COUNTER_MAX);
5402 memcg->soft_limit = PAGE_COUNTER_MAX;
5403 page_counter_set_high(&memcg->swap, PAGE_COUNTER_MAX);
5404 memcg_wb_domain_size_changed(memcg);
5407 static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
5409 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
5410 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
5411 struct memcg_vmstats_percpu *statc;
5415 statc = per_cpu_ptr(memcg->vmstats_percpu, cpu);
5417 for (i = 0; i < MEMCG_NR_STAT; i++) {
5419 * Collect the aggregated propagation counts of groups
5420 * below us. We're in a per-cpu loop here and this is
5421 * a global counter, so the first cycle will get them.
5423 delta = memcg->vmstats.state_pending[i];
5425 memcg->vmstats.state_pending[i] = 0;
5427 /* Add CPU changes on this level since the last flush */
5428 v = READ_ONCE(statc->state[i]);
5429 if (v != statc->state_prev[i]) {
5430 delta += v - statc->state_prev[i];
5431 statc->state_prev[i] = v;
5437 /* Aggregate counts on this level and propagate upwards */
5438 memcg->vmstats.state[i] += delta;
5440 parent->vmstats.state_pending[i] += delta;
5443 for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
5444 delta = memcg->vmstats.events_pending[i];
5446 memcg->vmstats.events_pending[i] = 0;
5448 v = READ_ONCE(statc->events[i]);
5449 if (v != statc->events_prev[i]) {
5450 delta += v - statc->events_prev[i];
5451 statc->events_prev[i] = v;
5457 memcg->vmstats.events[i] += delta;
5459 parent->vmstats.events_pending[i] += delta;
5464 /* Handlers for move charge at task migration. */
5465 static int mem_cgroup_do_precharge(unsigned long count)
5469 /* Try a single bulk charge without reclaim first, kswapd may wake */
5470 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_DIRECT_RECLAIM, count);
5472 mc.precharge += count;
5476 /* Try charges one by one with reclaim, but do not retry */
5478 ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
5492 enum mc_target_type {
5499 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5500 unsigned long addr, pte_t ptent)
5502 struct page *page = vm_normal_page(vma, addr, ptent);
5504 if (!page || !page_mapped(page))
5506 if (PageAnon(page)) {
5507 if (!(mc.flags & MOVE_ANON))
5510 if (!(mc.flags & MOVE_FILE))
5513 if (!get_page_unless_zero(page))
5519 #if defined(CONFIG_SWAP) || defined(CONFIG_DEVICE_PRIVATE)
5520 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5521 pte_t ptent, swp_entry_t *entry)
5523 struct page *page = NULL;
5524 swp_entry_t ent = pte_to_swp_entry(ptent);
5526 if (!(mc.flags & MOVE_ANON))
5530 * Handle MEMORY_DEVICE_PRIVATE which are ZONE_DEVICE page belonging to
5531 * a device and because they are not accessible by CPU they are store
5532 * as special swap entry in the CPU page table.
5534 if (is_device_private_entry(ent)) {
5535 page = device_private_entry_to_page(ent);
5537 * MEMORY_DEVICE_PRIVATE means ZONE_DEVICE page and which have
5538 * a refcount of 1 when free (unlike normal page)
5540 if (!page_ref_add_unless(page, 1, 1))
5545 if (non_swap_entry(ent))
5549 * Because lookup_swap_cache() updates some statistics counter,
5550 * we call find_get_page() with swapper_space directly.
5552 page = find_get_page(swap_address_space(ent), swp_offset(ent));
5553 entry->val = ent.val;
5558 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5559 pte_t ptent, swp_entry_t *entry)
5565 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5566 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5568 if (!vma->vm_file) /* anonymous vma */
5570 if (!(mc.flags & MOVE_FILE))
5573 /* page is moved even if it's not RSS of this task(page-faulted). */
5574 /* shmem/tmpfs may report page out on swap: account for that too. */
5575 return find_get_incore_page(vma->vm_file->f_mapping,
5576 linear_page_index(vma, addr));
5580 * mem_cgroup_move_account - move account of the page
5582 * @compound: charge the page as compound or small page
5583 * @from: mem_cgroup which the page is moved from.
5584 * @to: mem_cgroup which the page is moved to. @from != @to.
5586 * The caller must make sure the page is not on LRU (isolate_page() is useful.)
5588 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
5591 static int mem_cgroup_move_account(struct page *page,
5593 struct mem_cgroup *from,
5594 struct mem_cgroup *to)
5596 struct lruvec *from_vec, *to_vec;
5597 struct pglist_data *pgdat;
5598 unsigned int nr_pages = compound ? thp_nr_pages(page) : 1;
5601 VM_BUG_ON(from == to);
5602 VM_BUG_ON_PAGE(PageLRU(page), page);
5603 VM_BUG_ON(compound && !PageTransHuge(page));
5606 * Prevent mem_cgroup_migrate() from looking at
5607 * page's memory cgroup of its source page while we change it.
5610 if (!trylock_page(page))
5614 if (page_memcg(page) != from)
5617 pgdat = page_pgdat(page);
5618 from_vec = mem_cgroup_lruvec(from, pgdat);
5619 to_vec = mem_cgroup_lruvec(to, pgdat);
5621 lock_page_memcg(page);
5623 if (PageAnon(page)) {
5624 if (page_mapped(page)) {
5625 __mod_lruvec_state(from_vec, NR_ANON_MAPPED, -nr_pages);
5626 __mod_lruvec_state(to_vec, NR_ANON_MAPPED, nr_pages);
5627 if (PageTransHuge(page)) {
5628 __mod_lruvec_state(from_vec, NR_ANON_THPS,
5630 __mod_lruvec_state(to_vec, NR_ANON_THPS,
5635 __mod_lruvec_state(from_vec, NR_FILE_PAGES, -nr_pages);
5636 __mod_lruvec_state(to_vec, NR_FILE_PAGES, nr_pages);
5638 if (PageSwapBacked(page)) {
5639 __mod_lruvec_state(from_vec, NR_SHMEM, -nr_pages);
5640 __mod_lruvec_state(to_vec, NR_SHMEM, nr_pages);
5643 if (page_mapped(page)) {
5644 __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
5645 __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
5648 if (PageDirty(page)) {
5649 struct address_space *mapping = page_mapping(page);
5651 if (mapping_can_writeback(mapping)) {
5652 __mod_lruvec_state(from_vec, NR_FILE_DIRTY,
5654 __mod_lruvec_state(to_vec, NR_FILE_DIRTY,
5660 if (PageWriteback(page)) {
5661 __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
5662 __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
5666 * All state has been migrated, let's switch to the new memcg.
5668 * It is safe to change page's memcg here because the page
5669 * is referenced, charged, isolated, and locked: we can't race
5670 * with (un)charging, migration, LRU putback, or anything else
5671 * that would rely on a stable page's memory cgroup.
5673 * Note that lock_page_memcg is a memcg lock, not a page lock,
5674 * to save space. As soon as we switch page's memory cgroup to a
5675 * new memcg that isn't locked, the above state can change
5676 * concurrently again. Make sure we're truly done with it.
5681 css_put(&from->css);
5683 page->memcg_data = (unsigned long)to;
5685 __unlock_page_memcg(from);
5689 local_irq_disable();
5690 mem_cgroup_charge_statistics(to, page, nr_pages);
5691 memcg_check_events(to, page);
5692 mem_cgroup_charge_statistics(from, page, -nr_pages);
5693 memcg_check_events(from, page);
5702 * get_mctgt_type - get target type of moving charge
5703 * @vma: the vma the pte to be checked belongs
5704 * @addr: the address corresponding to the pte to be checked
5705 * @ptent: the pte to be checked
5706 * @target: the pointer the target page or swap ent will be stored(can be NULL)
5709 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
5710 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
5711 * move charge. if @target is not NULL, the page is stored in target->page
5712 * with extra refcnt got(Callers should handle it).
5713 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
5714 * target for charge migration. if @target is not NULL, the entry is stored
5716 * 3(MC_TARGET_DEVICE): like MC_TARGET_PAGE but page is MEMORY_DEVICE_PRIVATE
5717 * (so ZONE_DEVICE page and thus not on the lru).
5718 * For now we such page is charge like a regular page would be as for all
5719 * intent and purposes it is just special memory taking the place of a
5722 * See Documentations/vm/hmm.txt and include/linux/hmm.h
5724 * Called with pte lock held.
5727 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
5728 unsigned long addr, pte_t ptent, union mc_target *target)
5730 struct page *page = NULL;
5731 enum mc_target_type ret = MC_TARGET_NONE;
5732 swp_entry_t ent = { .val = 0 };
5734 if (pte_present(ptent))
5735 page = mc_handle_present_pte(vma, addr, ptent);
5736 else if (is_swap_pte(ptent))
5737 page = mc_handle_swap_pte(vma, ptent, &ent);
5738 else if (pte_none(ptent))
5739 page = mc_handle_file_pte(vma, addr, ptent, &ent);
5741 if (!page && !ent.val)
5745 * Do only loose check w/o serialization.
5746 * mem_cgroup_move_account() checks the page is valid or
5747 * not under LRU exclusion.
5749 if (page_memcg(page) == mc.from) {
5750 ret = MC_TARGET_PAGE;
5751 if (is_device_private_page(page))
5752 ret = MC_TARGET_DEVICE;
5754 target->page = page;
5756 if (!ret || !target)
5760 * There is a swap entry and a page doesn't exist or isn't charged.
5761 * But we cannot move a tail-page in a THP.
5763 if (ent.val && !ret && (!page || !PageTransCompound(page)) &&
5764 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
5765 ret = MC_TARGET_SWAP;
5772 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
5774 * We don't consider PMD mapped swapping or file mapped pages because THP does
5775 * not support them for now.
5776 * Caller should make sure that pmd_trans_huge(pmd) is true.
5778 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5779 unsigned long addr, pmd_t pmd, union mc_target *target)
5781 struct page *page = NULL;
5782 enum mc_target_type ret = MC_TARGET_NONE;
5784 if (unlikely(is_swap_pmd(pmd))) {
5785 VM_BUG_ON(thp_migration_supported() &&
5786 !is_pmd_migration_entry(pmd));
5789 page = pmd_page(pmd);
5790 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
5791 if (!(mc.flags & MOVE_ANON))
5793 if (page_memcg(page) == mc.from) {
5794 ret = MC_TARGET_PAGE;
5797 target->page = page;
5803 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
5804 unsigned long addr, pmd_t pmd, union mc_target *target)
5806 return MC_TARGET_NONE;
5810 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
5811 unsigned long addr, unsigned long end,
5812 struct mm_walk *walk)
5814 struct vm_area_struct *vma = walk->vma;
5818 ptl = pmd_trans_huge_lock(pmd, vma);
5821 * Note their can not be MC_TARGET_DEVICE for now as we do not
5822 * support transparent huge page with MEMORY_DEVICE_PRIVATE but
5823 * this might change.
5825 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
5826 mc.precharge += HPAGE_PMD_NR;
5831 if (pmd_trans_unstable(pmd))
5833 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5834 for (; addr != end; pte++, addr += PAGE_SIZE)
5835 if (get_mctgt_type(vma, addr, *pte, NULL))
5836 mc.precharge++; /* increment precharge temporarily */
5837 pte_unmap_unlock(pte - 1, ptl);
5843 static const struct mm_walk_ops precharge_walk_ops = {
5844 .pmd_entry = mem_cgroup_count_precharge_pte_range,
5847 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
5849 unsigned long precharge;
5852 walk_page_range(mm, 0, mm->highest_vm_end, &precharge_walk_ops, NULL);
5853 mmap_read_unlock(mm);
5855 precharge = mc.precharge;
5861 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
5863 unsigned long precharge = mem_cgroup_count_precharge(mm);
5865 VM_BUG_ON(mc.moving_task);
5866 mc.moving_task = current;
5867 return mem_cgroup_do_precharge(precharge);
5870 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
5871 static void __mem_cgroup_clear_mc(void)
5873 struct mem_cgroup *from = mc.from;
5874 struct mem_cgroup *to = mc.to;
5876 /* we must uncharge all the leftover precharges from mc.to */
5878 cancel_charge(mc.to, mc.precharge);
5882 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
5883 * we must uncharge here.
5885 if (mc.moved_charge) {
5886 cancel_charge(mc.from, mc.moved_charge);
5887 mc.moved_charge = 0;
5889 /* we must fixup refcnts and charges */
5890 if (mc.moved_swap) {
5891 /* uncharge swap account from the old cgroup */
5892 if (!mem_cgroup_is_root(mc.from))
5893 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
5895 mem_cgroup_id_put_many(mc.from, mc.moved_swap);
5898 * we charged both to->memory and to->memsw, so we
5899 * should uncharge to->memory.
5901 if (!mem_cgroup_is_root(mc.to))
5902 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
5906 memcg_oom_recover(from);
5907 memcg_oom_recover(to);
5908 wake_up_all(&mc.waitq);
5911 static void mem_cgroup_clear_mc(void)
5913 struct mm_struct *mm = mc.mm;
5916 * we must clear moving_task before waking up waiters at the end of
5919 mc.moving_task = NULL;
5920 __mem_cgroup_clear_mc();
5921 spin_lock(&mc.lock);
5925 spin_unlock(&mc.lock);
5930 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5932 struct cgroup_subsys_state *css;
5933 struct mem_cgroup *memcg = NULL; /* unneeded init to make gcc happy */
5934 struct mem_cgroup *from;
5935 struct task_struct *leader, *p;
5936 struct mm_struct *mm;
5937 unsigned long move_flags;
5940 /* charge immigration isn't supported on the default hierarchy */
5941 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
5945 * Multi-process migrations only happen on the default hierarchy
5946 * where charge immigration is not used. Perform charge
5947 * immigration if @tset contains a leader and whine if there are
5951 cgroup_taskset_for_each_leader(leader, css, tset) {
5954 memcg = mem_cgroup_from_css(css);
5960 * We are now committed to this value whatever it is. Changes in this
5961 * tunable will only affect upcoming migrations, not the current one.
5962 * So we need to save it, and keep it going.
5964 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
5968 from = mem_cgroup_from_task(p);
5970 VM_BUG_ON(from == memcg);
5972 mm = get_task_mm(p);
5975 /* We move charges only when we move a owner of the mm */
5976 if (mm->owner == p) {
5979 VM_BUG_ON(mc.precharge);
5980 VM_BUG_ON(mc.moved_charge);
5981 VM_BUG_ON(mc.moved_swap);
5983 spin_lock(&mc.lock);
5987 mc.flags = move_flags;
5988 spin_unlock(&mc.lock);
5989 /* We set mc.moving_task later */
5991 ret = mem_cgroup_precharge_mc(mm);
5993 mem_cgroup_clear_mc();
6000 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6003 mem_cgroup_clear_mc();
6006 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
6007 unsigned long addr, unsigned long end,
6008 struct mm_walk *walk)
6011 struct vm_area_struct *vma = walk->vma;
6014 enum mc_target_type target_type;
6015 union mc_target target;
6018 ptl = pmd_trans_huge_lock(pmd, vma);
6020 if (mc.precharge < HPAGE_PMD_NR) {
6024 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
6025 if (target_type == MC_TARGET_PAGE) {
6027 if (!isolate_lru_page(page)) {
6028 if (!mem_cgroup_move_account(page, true,
6030 mc.precharge -= HPAGE_PMD_NR;
6031 mc.moved_charge += HPAGE_PMD_NR;
6033 putback_lru_page(page);
6036 } else if (target_type == MC_TARGET_DEVICE) {
6038 if (!mem_cgroup_move_account(page, true,
6040 mc.precharge -= HPAGE_PMD_NR;
6041 mc.moved_charge += HPAGE_PMD_NR;
6049 if (pmd_trans_unstable(pmd))
6052 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
6053 for (; addr != end; addr += PAGE_SIZE) {
6054 pte_t ptent = *(pte++);
6055 bool device = false;
6061 switch (get_mctgt_type(vma, addr, ptent, &target)) {
6062 case MC_TARGET_DEVICE:
6065 case MC_TARGET_PAGE:
6068 * We can have a part of the split pmd here. Moving it
6069 * can be done but it would be too convoluted so simply
6070 * ignore such a partial THP and keep it in original
6071 * memcg. There should be somebody mapping the head.
6073 if (PageTransCompound(page))
6075 if (!device && isolate_lru_page(page))
6077 if (!mem_cgroup_move_account(page, false,
6080 /* we uncharge from mc.from later. */
6084 putback_lru_page(page);
6085 put: /* get_mctgt_type() gets the page */
6088 case MC_TARGET_SWAP:
6090 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
6092 mem_cgroup_id_get_many(mc.to, 1);
6093 /* we fixup other refcnts and charges later. */
6101 pte_unmap_unlock(pte - 1, ptl);
6106 * We have consumed all precharges we got in can_attach().
6107 * We try charge one by one, but don't do any additional
6108 * charges to mc.to if we have failed in charge once in attach()
6111 ret = mem_cgroup_do_precharge(1);
6119 static const struct mm_walk_ops charge_walk_ops = {
6120 .pmd_entry = mem_cgroup_move_charge_pte_range,
6123 static void mem_cgroup_move_charge(void)
6125 lru_add_drain_all();
6127 * Signal lock_page_memcg() to take the memcg's move_lock
6128 * while we're moving its pages to another memcg. Then wait
6129 * for already started RCU-only updates to finish.
6131 atomic_inc(&mc.from->moving_account);
6134 if (unlikely(!mmap_read_trylock(mc.mm))) {
6136 * Someone who are holding the mmap_lock might be waiting in
6137 * waitq. So we cancel all extra charges, wake up all waiters,
6138 * and retry. Because we cancel precharges, we might not be able
6139 * to move enough charges, but moving charge is a best-effort
6140 * feature anyway, so it wouldn't be a big problem.
6142 __mem_cgroup_clear_mc();
6147 * When we have consumed all precharges and failed in doing
6148 * additional charge, the page walk just aborts.
6150 walk_page_range(mc.mm, 0, mc.mm->highest_vm_end, &charge_walk_ops,
6153 mmap_read_unlock(mc.mm);
6154 atomic_dec(&mc.from->moving_account);
6157 static void mem_cgroup_move_task(void)
6160 mem_cgroup_move_charge();
6161 mem_cgroup_clear_mc();
6164 #else /* !CONFIG_MMU */
6165 static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
6169 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
6172 static void mem_cgroup_move_task(void)
6177 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
6179 if (value == PAGE_COUNTER_MAX)
6180 seq_puts(m, "max\n");
6182 seq_printf(m, "%llu\n", (u64)value * PAGE_SIZE);
6187 static u64 memory_current_read(struct cgroup_subsys_state *css,
6190 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
6192 return (u64)page_counter_read(&memcg->memory) * PAGE_SIZE;
6195 static int memory_min_show(struct seq_file *m, void *v)
6197 return seq_puts_memcg_tunable(m,
6198 READ_ONCE(mem_cgroup_from_seq(m)->memory.min));
6201 static ssize_t memory_min_write(struct kernfs_open_file *of,
6202 char *buf, size_t nbytes, loff_t off)
6204 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6208 buf = strstrip(buf);
6209 err = page_counter_memparse(buf, "max", &min);
6213 page_counter_set_min(&memcg->memory, min);
6218 static int memory_low_show(struct seq_file *m, void *v)
6220 return seq_puts_memcg_tunable(m,
6221 READ_ONCE(mem_cgroup_from_seq(m)->memory.low));
6224 static ssize_t memory_low_write(struct kernfs_open_file *of,
6225 char *buf, size_t nbytes, loff_t off)
6227 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6231 buf = strstrip(buf);
6232 err = page_counter_memparse(buf, "max", &low);
6236 page_counter_set_low(&memcg->memory, low);
6241 static int memory_high_show(struct seq_file *m, void *v)
6243 return seq_puts_memcg_tunable(m,
6244 READ_ONCE(mem_cgroup_from_seq(m)->memory.high));
6247 static ssize_t memory_high_write(struct kernfs_open_file *of,
6248 char *buf, size_t nbytes, loff_t off)
6250 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6251 unsigned int nr_retries = MAX_RECLAIM_RETRIES;
6252 bool drained = false;
6256 buf = strstrip(buf);
6257 err = page_counter_memparse(buf, "max", &high);
6261 page_counter_set_high(&memcg->memory, high);
6264 unsigned long nr_pages = page_counter_read(&memcg->memory);
6265 unsigned long reclaimed;
6267 if (nr_pages <= high)
6270 if (signal_pending(current))
6274 drain_all_stock(memcg);
6279 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6282 if (!reclaimed && !nr_retries--)
6286 memcg_wb_domain_size_changed(memcg);
6290 static int memory_max_show(struct seq_file *m, void *v)
6292 return seq_puts_memcg_tunable(m,
6293 READ_ONCE(mem_cgroup_from_seq(m)->memory.max));
6296 static ssize_t memory_max_write(struct kernfs_open_file *of,
6297 char *buf, size_t nbytes, loff_t off)
6299 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6300 unsigned int nr_reclaims = MAX_RECLAIM_RETRIES;
6301 bool drained = false;
6305 buf = strstrip(buf);
6306 err = page_counter_memparse(buf, "max", &max);
6310 xchg(&memcg->memory.max, max);
6313 unsigned long nr_pages = page_counter_read(&memcg->memory);
6315 if (nr_pages <= max)
6318 if (signal_pending(current))
6322 drain_all_stock(memcg);
6328 if (!try_to_free_mem_cgroup_pages(memcg, nr_pages - max,
6334 memcg_memory_event(memcg, MEMCG_OOM);
6335 if (!mem_cgroup_out_of_memory(memcg, GFP_KERNEL, 0))
6339 memcg_wb_domain_size_changed(memcg);
6343 static void __memory_events_show(struct seq_file *m, atomic_long_t *events)
6345 seq_printf(m, "low %lu\n", atomic_long_read(&events[MEMCG_LOW]));
6346 seq_printf(m, "high %lu\n", atomic_long_read(&events[MEMCG_HIGH]));
6347 seq_printf(m, "max %lu\n", atomic_long_read(&events[MEMCG_MAX]));
6348 seq_printf(m, "oom %lu\n", atomic_long_read(&events[MEMCG_OOM]));
6349 seq_printf(m, "oom_kill %lu\n",
6350 atomic_long_read(&events[MEMCG_OOM_KILL]));
6353 static int memory_events_show(struct seq_file *m, void *v)
6355 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6357 __memory_events_show(m, memcg->memory_events);
6361 static int memory_events_local_show(struct seq_file *m, void *v)
6363 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6365 __memory_events_show(m, memcg->memory_events_local);
6369 static int memory_stat_show(struct seq_file *m, void *v)
6371 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6374 buf = memory_stat_format(memcg);
6383 static inline unsigned long lruvec_page_state_output(struct lruvec *lruvec,
6386 return lruvec_page_state(lruvec, item) * memcg_page_state_unit(item);
6389 static int memory_numa_stat_show(struct seq_file *m, void *v)
6392 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6394 for (i = 0; i < ARRAY_SIZE(memory_stats); i++) {
6397 if (memory_stats[i].idx >= NR_VM_NODE_STAT_ITEMS)
6400 seq_printf(m, "%s", memory_stats[i].name);
6401 for_each_node_state(nid, N_MEMORY) {
6403 struct lruvec *lruvec;
6405 lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
6406 size = lruvec_page_state_output(lruvec,
6407 memory_stats[i].idx);
6408 seq_printf(m, " N%d=%llu", nid, size);
6417 static int memory_oom_group_show(struct seq_file *m, void *v)
6419 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
6421 seq_printf(m, "%d\n", memcg->oom_group);
6426 static ssize_t memory_oom_group_write(struct kernfs_open_file *of,
6427 char *buf, size_t nbytes, loff_t off)
6429 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
6432 buf = strstrip(buf);
6436 ret = kstrtoint(buf, 0, &oom_group);
6440 if (oom_group != 0 && oom_group != 1)
6443 memcg->oom_group = oom_group;
6448 static struct cftype memory_files[] = {
6451 .flags = CFTYPE_NOT_ON_ROOT,
6452 .read_u64 = memory_current_read,
6456 .flags = CFTYPE_NOT_ON_ROOT,
6457 .seq_show = memory_min_show,
6458 .write = memory_min_write,
6462 .flags = CFTYPE_NOT_ON_ROOT,
6463 .seq_show = memory_low_show,
6464 .write = memory_low_write,
6468 .flags = CFTYPE_NOT_ON_ROOT,
6469 .seq_show = memory_high_show,
6470 .write = memory_high_write,
6474 .flags = CFTYPE_NOT_ON_ROOT,
6475 .seq_show = memory_max_show,
6476 .write = memory_max_write,
6480 .flags = CFTYPE_NOT_ON_ROOT,
6481 .file_offset = offsetof(struct mem_cgroup, events_file),
6482 .seq_show = memory_events_show,
6485 .name = "events.local",
6486 .flags = CFTYPE_NOT_ON_ROOT,
6487 .file_offset = offsetof(struct mem_cgroup, events_local_file),
6488 .seq_show = memory_events_local_show,
6492 .seq_show = memory_stat_show,
6496 .name = "numa_stat",
6497 .seq_show = memory_numa_stat_show,
6501 .name = "oom.group",
6502 .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_NS_DELEGATABLE,
6503 .seq_show = memory_oom_group_show,
6504 .write = memory_oom_group_write,
6509 struct cgroup_subsys memory_cgrp_subsys = {
6510 .css_alloc = mem_cgroup_css_alloc,
6511 .css_online = mem_cgroup_css_online,
6512 .css_offline = mem_cgroup_css_offline,
6513 .css_released = mem_cgroup_css_released,
6514 .css_free = mem_cgroup_css_free,
6515 .css_reset = mem_cgroup_css_reset,
6516 .css_rstat_flush = mem_cgroup_css_rstat_flush,
6517 .can_attach = mem_cgroup_can_attach,
6518 .cancel_attach = mem_cgroup_cancel_attach,
6519 .post_attach = mem_cgroup_move_task,
6520 .dfl_cftypes = memory_files,
6521 .legacy_cftypes = mem_cgroup_legacy_files,
6526 * This function calculates an individual cgroup's effective
6527 * protection which is derived from its own memory.min/low, its
6528 * parent's and siblings' settings, as well as the actual memory
6529 * distribution in the tree.
6531 * The following rules apply to the effective protection values:
6533 * 1. At the first level of reclaim, effective protection is equal to
6534 * the declared protection in memory.min and memory.low.
6536 * 2. To enable safe delegation of the protection configuration, at
6537 * subsequent levels the effective protection is capped to the
6538 * parent's effective protection.
6540 * 3. To make complex and dynamic subtrees easier to configure, the
6541 * user is allowed to overcommit the declared protection at a given
6542 * level. If that is the case, the parent's effective protection is
6543 * distributed to the children in proportion to how much protection
6544 * they have declared and how much of it they are utilizing.
6546 * This makes distribution proportional, but also work-conserving:
6547 * if one cgroup claims much more protection than it uses memory,
6548 * the unused remainder is available to its siblings.
6550 * 4. Conversely, when the declared protection is undercommitted at a
6551 * given level, the distribution of the larger parental protection
6552 * budget is NOT proportional. A cgroup's protection from a sibling
6553 * is capped to its own memory.min/low setting.
6555 * 5. However, to allow protecting recursive subtrees from each other
6556 * without having to declare each individual cgroup's fixed share
6557 * of the ancestor's claim to protection, any unutilized -
6558 * "floating" - protection from up the tree is distributed in
6559 * proportion to each cgroup's *usage*. This makes the protection
6560 * neutral wrt sibling cgroups and lets them compete freely over
6561 * the shared parental protection budget, but it protects the
6562 * subtree as a whole from neighboring subtrees.
6564 * Note that 4. and 5. are not in conflict: 4. is about protecting
6565 * against immediate siblings whereas 5. is about protecting against
6566 * neighboring subtrees.
6568 static unsigned long effective_protection(unsigned long usage,
6569 unsigned long parent_usage,
6570 unsigned long setting,
6571 unsigned long parent_effective,
6572 unsigned long siblings_protected)
6574 unsigned long protected;
6577 protected = min(usage, setting);
6579 * If all cgroups at this level combined claim and use more
6580 * protection then what the parent affords them, distribute
6581 * shares in proportion to utilization.
6583 * We are using actual utilization rather than the statically
6584 * claimed protection in order to be work-conserving: claimed
6585 * but unused protection is available to siblings that would
6586 * otherwise get a smaller chunk than what they claimed.
6588 if (siblings_protected > parent_effective)
6589 return protected * parent_effective / siblings_protected;
6592 * Ok, utilized protection of all children is within what the
6593 * parent affords them, so we know whatever this child claims
6594 * and utilizes is effectively protected.
6596 * If there is unprotected usage beyond this value, reclaim
6597 * will apply pressure in proportion to that amount.
6599 * If there is unutilized protection, the cgroup will be fully
6600 * shielded from reclaim, but we do return a smaller value for
6601 * protection than what the group could enjoy in theory. This
6602 * is okay. With the overcommit distribution above, effective
6603 * protection is always dependent on how memory is actually
6604 * consumed among the siblings anyway.
6609 * If the children aren't claiming (all of) the protection
6610 * afforded to them by the parent, distribute the remainder in
6611 * proportion to the (unprotected) memory of each cgroup. That
6612 * way, cgroups that aren't explicitly prioritized wrt each
6613 * other compete freely over the allowance, but they are
6614 * collectively protected from neighboring trees.
6616 * We're using unprotected memory for the weight so that if
6617 * some cgroups DO claim explicit protection, we don't protect
6618 * the same bytes twice.
6620 * Check both usage and parent_usage against the respective
6621 * protected values. One should imply the other, but they
6622 * aren't read atomically - make sure the division is sane.
6624 if (!(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_RECURSIVE_PROT))
6626 if (parent_effective > siblings_protected &&
6627 parent_usage > siblings_protected &&
6628 usage > protected) {
6629 unsigned long unclaimed;
6631 unclaimed = parent_effective - siblings_protected;
6632 unclaimed *= usage - protected;
6633 unclaimed /= parent_usage - siblings_protected;
6642 * mem_cgroup_protected - check if memory consumption is in the normal range
6643 * @root: the top ancestor of the sub-tree being checked
6644 * @memcg: the memory cgroup to check
6646 * WARNING: This function is not stateless! It can only be used as part
6647 * of a top-down tree iteration, not for isolated queries.
6649 void mem_cgroup_calculate_protection(struct mem_cgroup *root,
6650 struct mem_cgroup *memcg)
6652 unsigned long usage, parent_usage;
6653 struct mem_cgroup *parent;
6655 if (mem_cgroup_disabled())
6659 root = root_mem_cgroup;
6662 * Effective values of the reclaim targets are ignored so they
6663 * can be stale. Have a look at mem_cgroup_protection for more
6665 * TODO: calculation should be more robust so that we do not need
6666 * that special casing.
6671 usage = page_counter_read(&memcg->memory);
6675 parent = parent_mem_cgroup(memcg);
6676 /* No parent means a non-hierarchical mode on v1 memcg */
6680 if (parent == root) {
6681 memcg->memory.emin = READ_ONCE(memcg->memory.min);
6682 memcg->memory.elow = READ_ONCE(memcg->memory.low);
6686 parent_usage = page_counter_read(&parent->memory);
6688 WRITE_ONCE(memcg->memory.emin, effective_protection(usage, parent_usage,
6689 READ_ONCE(memcg->memory.min),
6690 READ_ONCE(parent->memory.emin),
6691 atomic_long_read(&parent->memory.children_min_usage)));
6693 WRITE_ONCE(memcg->memory.elow, effective_protection(usage, parent_usage,
6694 READ_ONCE(memcg->memory.low),
6695 READ_ONCE(parent->memory.elow),
6696 atomic_long_read(&parent->memory.children_low_usage)));
6699 static int __mem_cgroup_charge(struct page *page, struct mem_cgroup *memcg,
6702 unsigned int nr_pages = thp_nr_pages(page);
6705 ret = try_charge(memcg, gfp, nr_pages);
6709 css_get(&memcg->css);
6710 commit_charge(page, memcg);
6712 local_irq_disable();
6713 mem_cgroup_charge_statistics(memcg, page, nr_pages);
6714 memcg_check_events(memcg, page);
6721 * mem_cgroup_charge - charge a newly allocated page to a cgroup
6722 * @page: page to charge
6723 * @mm: mm context of the victim
6724 * @gfp_mask: reclaim mode
6726 * Try to charge @page to the memcg that @mm belongs to, reclaiming
6727 * pages according to @gfp_mask if necessary. if @mm is NULL, try to
6728 * charge to the active memcg.
6730 * Do not use this for pages allocated for swapin.
6732 * Returns 0 on success. Otherwise, an error code is returned.
6734 int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
6736 struct mem_cgroup *memcg;
6739 if (mem_cgroup_disabled())
6742 memcg = get_mem_cgroup_from_mm(mm);
6743 ret = __mem_cgroup_charge(page, memcg, gfp_mask);
6744 css_put(&memcg->css);
6750 * mem_cgroup_swapin_charge_page - charge a newly allocated page for swapin
6751 * @page: page to charge
6752 * @mm: mm context of the victim
6753 * @gfp: reclaim mode
6754 * @entry: swap entry for which the page is allocated
6756 * This function charges a page allocated for swapin. Please call this before
6757 * adding the page to the swapcache.
6759 * Returns 0 on success. Otherwise, an error code is returned.
6761 int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm,
6762 gfp_t gfp, swp_entry_t entry)
6764 struct mem_cgroup *memcg;
6768 if (mem_cgroup_disabled())
6771 id = lookup_swap_cgroup_id(entry);
6773 memcg = mem_cgroup_from_id(id);
6774 if (!memcg || !css_tryget_online(&memcg->css))
6775 memcg = get_mem_cgroup_from_mm(mm);
6778 ret = __mem_cgroup_charge(page, memcg, gfp);
6780 css_put(&memcg->css);
6785 * mem_cgroup_swapin_uncharge_swap - uncharge swap slot
6786 * @entry: swap entry for which the page is charged
6788 * Call this function after successfully adding the charged page to swapcache.
6790 * Note: This function assumes the page for which swap slot is being uncharged
6793 void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry)
6796 * Cgroup1's unified memory+swap counter has been charged with the
6797 * new swapcache page, finish the transfer by uncharging the swap
6798 * slot. The swap slot would also get uncharged when it dies, but
6799 * it can stick around indefinitely and we'd count the page twice
6802 * Cgroup2 has separate resource counters for memory and swap,
6803 * so this is a non-issue here. Memory and swap charge lifetimes
6804 * correspond 1:1 to page and swap slot lifetimes: we charge the
6805 * page to memory here, and uncharge swap when the slot is freed.
6807 if (!mem_cgroup_disabled() && do_memsw_account()) {
6809 * The swap entry might not get freed for a long time,
6810 * let's not wait for it. The page already received a
6811 * memory+swap charge, drop the swap entry duplicate.
6813 mem_cgroup_uncharge_swap(entry, 1);
6817 struct uncharge_gather {
6818 struct mem_cgroup *memcg;
6819 unsigned long nr_memory;
6820 unsigned long pgpgout;
6821 unsigned long nr_kmem;
6822 struct page *dummy_page;
6825 static inline void uncharge_gather_clear(struct uncharge_gather *ug)
6827 memset(ug, 0, sizeof(*ug));
6830 static void uncharge_batch(const struct uncharge_gather *ug)
6832 unsigned long flags;
6834 if (ug->nr_memory) {
6835 page_counter_uncharge(&ug->memcg->memory, ug->nr_memory);
6836 if (do_memsw_account())
6837 page_counter_uncharge(&ug->memcg->memsw, ug->nr_memory);
6838 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && ug->nr_kmem)
6839 page_counter_uncharge(&ug->memcg->kmem, ug->nr_kmem);
6840 memcg_oom_recover(ug->memcg);
6843 local_irq_save(flags);
6844 __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
6845 __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory);
6846 memcg_check_events(ug->memcg, ug->dummy_page);
6847 local_irq_restore(flags);
6849 /* drop reference from uncharge_page */
6850 css_put(&ug->memcg->css);
6853 static void uncharge_page(struct page *page, struct uncharge_gather *ug)
6855 unsigned long nr_pages;
6856 struct mem_cgroup *memcg;
6857 struct obj_cgroup *objcg;
6858 bool use_objcg = PageMemcgKmem(page);
6860 VM_BUG_ON_PAGE(PageLRU(page), page);
6863 * Nobody should be changing or seriously looking at
6864 * page memcg or objcg at this point, we have fully
6865 * exclusive access to the page.
6868 objcg = __page_objcg(page);
6870 * This get matches the put at the end of the function and
6871 * kmem pages do not hold memcg references anymore.
6873 memcg = get_mem_cgroup_from_objcg(objcg);
6875 memcg = __page_memcg(page);
6881 if (ug->memcg != memcg) {
6884 uncharge_gather_clear(ug);
6887 ug->dummy_page = page;
6889 /* pairs with css_put in uncharge_batch */
6890 css_get(&memcg->css);
6893 nr_pages = compound_nr(page);
6896 ug->nr_memory += nr_pages;
6897 ug->nr_kmem += nr_pages;
6899 page->memcg_data = 0;
6900 obj_cgroup_put(objcg);
6902 /* LRU pages aren't accounted at the root level */
6903 if (!mem_cgroup_is_root(memcg))
6904 ug->nr_memory += nr_pages;
6907 page->memcg_data = 0;
6910 css_put(&memcg->css);
6914 * mem_cgroup_uncharge - uncharge a page
6915 * @page: page to uncharge
6917 * Uncharge a page previously charged with mem_cgroup_charge().
6919 void mem_cgroup_uncharge(struct page *page)
6921 struct uncharge_gather ug;
6923 if (mem_cgroup_disabled())
6926 /* Don't touch page->lru of any random page, pre-check: */
6927 if (!page_memcg(page))
6930 uncharge_gather_clear(&ug);
6931 uncharge_page(page, &ug);
6932 uncharge_batch(&ug);
6936 * mem_cgroup_uncharge_list - uncharge a list of page
6937 * @page_list: list of pages to uncharge
6939 * Uncharge a list of pages previously charged with
6940 * mem_cgroup_charge().
6942 void mem_cgroup_uncharge_list(struct list_head *page_list)
6944 struct uncharge_gather ug;
6947 if (mem_cgroup_disabled())
6950 uncharge_gather_clear(&ug);
6951 list_for_each_entry(page, page_list, lru)
6952 uncharge_page(page, &ug);
6954 uncharge_batch(&ug);
6958 * mem_cgroup_migrate - charge a page's replacement
6959 * @oldpage: currently circulating page
6960 * @newpage: replacement page
6962 * Charge @newpage as a replacement page for @oldpage. @oldpage will
6963 * be uncharged upon free.
6965 * Both pages must be locked, @newpage->mapping must be set up.
6967 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
6969 struct mem_cgroup *memcg;
6970 unsigned int nr_pages;
6971 unsigned long flags;
6973 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
6974 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
6975 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
6976 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
6979 if (mem_cgroup_disabled())
6982 /* Page cache replacement: new page already charged? */
6983 if (page_memcg(newpage))
6986 memcg = page_memcg(oldpage);
6987 VM_WARN_ON_ONCE_PAGE(!memcg, oldpage);
6991 /* Force-charge the new page. The old one will be freed soon */
6992 nr_pages = thp_nr_pages(newpage);
6994 if (!mem_cgroup_is_root(memcg)) {
6995 page_counter_charge(&memcg->memory, nr_pages);
6996 if (do_memsw_account())
6997 page_counter_charge(&memcg->memsw, nr_pages);
7000 css_get(&memcg->css);
7001 commit_charge(newpage, memcg);
7003 local_irq_save(flags);
7004 mem_cgroup_charge_statistics(memcg, newpage, nr_pages);
7005 memcg_check_events(memcg, newpage);
7006 local_irq_restore(flags);
7009 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
7010 EXPORT_SYMBOL(memcg_sockets_enabled_key);
7012 void mem_cgroup_sk_alloc(struct sock *sk)
7014 struct mem_cgroup *memcg;
7016 if (!mem_cgroup_sockets_enabled)
7019 /* Do not associate the sock with unrelated interrupted task's memcg. */
7024 memcg = mem_cgroup_from_task(current);
7025 if (memcg == root_mem_cgroup)
7027 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
7029 if (css_tryget(&memcg->css))
7030 sk->sk_memcg = memcg;
7035 void mem_cgroup_sk_free(struct sock *sk)
7038 css_put(&sk->sk_memcg->css);
7042 * mem_cgroup_charge_skmem - charge socket memory
7043 * @memcg: memcg to charge
7044 * @nr_pages: number of pages to charge
7046 * Charges @nr_pages to @memcg. Returns %true if the charge fit within
7047 * @memcg's configured limit, %false if the charge had to be forced.
7049 bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7051 gfp_t gfp_mask = GFP_KERNEL;
7053 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7054 struct page_counter *fail;
7056 if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
7057 memcg->tcpmem_pressure = 0;
7060 page_counter_charge(&memcg->tcpmem, nr_pages);
7061 memcg->tcpmem_pressure = 1;
7065 /* Don't block in the packet receive path */
7067 gfp_mask = GFP_NOWAIT;
7069 mod_memcg_state(memcg, MEMCG_SOCK, nr_pages);
7071 if (try_charge(memcg, gfp_mask, nr_pages) == 0)
7074 try_charge(memcg, gfp_mask|__GFP_NOFAIL, nr_pages);
7079 * mem_cgroup_uncharge_skmem - uncharge socket memory
7080 * @memcg: memcg to uncharge
7081 * @nr_pages: number of pages to uncharge
7083 void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
7085 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
7086 page_counter_uncharge(&memcg->tcpmem, nr_pages);
7090 mod_memcg_state(memcg, MEMCG_SOCK, -nr_pages);
7092 refill_stock(memcg, nr_pages);
7095 static int __init cgroup_memory(char *s)
7099 while ((token = strsep(&s, ",")) != NULL) {
7102 if (!strcmp(token, "nosocket"))
7103 cgroup_memory_nosocket = true;
7104 if (!strcmp(token, "nokmem"))
7105 cgroup_memory_nokmem = true;
7109 __setup("cgroup.memory=", cgroup_memory);
7112 * subsys_initcall() for memory controller.
7114 * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this
7115 * context because of lock dependencies (cgroup_lock -> cpu hotplug) but
7116 * basically everything that doesn't depend on a specific mem_cgroup structure
7117 * should be initialized from here.
7119 static int __init mem_cgroup_init(void)
7124 * Currently s32 type (can refer to struct batched_lruvec_stat) is
7125 * used for per-memcg-per-cpu caching of per-node statistics. In order
7126 * to work fine, we should make sure that the overfill threshold can't
7127 * exceed S32_MAX / PAGE_SIZE.
7129 BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE);
7131 cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
7132 memcg_hotplug_cpu_dead);
7134 for_each_possible_cpu(cpu)
7135 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
7138 for_each_node(node) {
7139 struct mem_cgroup_tree_per_node *rtpn;
7141 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
7142 node_online(node) ? node : NUMA_NO_NODE);
7144 rtpn->rb_root = RB_ROOT;
7145 rtpn->rb_rightmost = NULL;
7146 spin_lock_init(&rtpn->lock);
7147 soft_limit_tree.rb_tree_per_node[node] = rtpn;
7152 subsys_initcall(mem_cgroup_init);
7154 #ifdef CONFIG_MEMCG_SWAP
7155 static struct mem_cgroup *mem_cgroup_id_get_online(struct mem_cgroup *memcg)
7157 while (!refcount_inc_not_zero(&memcg->id.ref)) {
7159 * The root cgroup cannot be destroyed, so it's refcount must
7162 if (WARN_ON_ONCE(memcg == root_mem_cgroup)) {
7166 memcg = parent_mem_cgroup(memcg);
7168 memcg = root_mem_cgroup;
7174 * mem_cgroup_swapout - transfer a memsw charge to swap
7175 * @page: page whose memsw charge to transfer
7176 * @entry: swap entry to move the charge to
7178 * Transfer the memsw charge of @page to @entry.
7180 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
7182 struct mem_cgroup *memcg, *swap_memcg;
7183 unsigned int nr_entries;
7184 unsigned short oldid;
7186 VM_BUG_ON_PAGE(PageLRU(page), page);
7187 VM_BUG_ON_PAGE(page_count(page), page);
7189 if (mem_cgroup_disabled())
7192 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7195 memcg = page_memcg(page);
7197 VM_WARN_ON_ONCE_PAGE(!memcg, page);
7202 * In case the memcg owning these pages has been offlined and doesn't
7203 * have an ID allocated to it anymore, charge the closest online
7204 * ancestor for the swap instead and transfer the memory+swap charge.
7206 swap_memcg = mem_cgroup_id_get_online(memcg);
7207 nr_entries = thp_nr_pages(page);
7208 /* Get references for the tail pages, too */
7210 mem_cgroup_id_get_many(swap_memcg, nr_entries - 1);
7211 oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg),
7213 VM_BUG_ON_PAGE(oldid, page);
7214 mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries);
7216 page->memcg_data = 0;
7218 if (!mem_cgroup_is_root(memcg))
7219 page_counter_uncharge(&memcg->memory, nr_entries);
7221 if (!cgroup_memory_noswap && memcg != swap_memcg) {
7222 if (!mem_cgroup_is_root(swap_memcg))
7223 page_counter_charge(&swap_memcg->memsw, nr_entries);
7224 page_counter_uncharge(&memcg->memsw, nr_entries);
7228 * Interrupts should be disabled here because the caller holds the
7229 * i_pages lock which is taken with interrupts-off. It is
7230 * important here to have the interrupts disabled because it is the
7231 * only synchronisation we have for updating the per-CPU variables.
7233 VM_BUG_ON(!irqs_disabled());
7234 mem_cgroup_charge_statistics(memcg, page, -nr_entries);
7235 memcg_check_events(memcg, page);
7237 css_put(&memcg->css);
7241 * mem_cgroup_try_charge_swap - try charging swap space for a page
7242 * @page: page being added to swap
7243 * @entry: swap entry to charge
7245 * Try to charge @page's memcg for the swap space at @entry.
7247 * Returns 0 on success, -ENOMEM on failure.
7249 int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry)
7251 unsigned int nr_pages = thp_nr_pages(page);
7252 struct page_counter *counter;
7253 struct mem_cgroup *memcg;
7254 unsigned short oldid;
7256 if (mem_cgroup_disabled())
7259 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
7262 memcg = page_memcg(page);
7264 VM_WARN_ON_ONCE_PAGE(!memcg, page);
7269 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7273 memcg = mem_cgroup_id_get_online(memcg);
7275 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg) &&
7276 !page_counter_try_charge(&memcg->swap, nr_pages, &counter)) {
7277 memcg_memory_event(memcg, MEMCG_SWAP_MAX);
7278 memcg_memory_event(memcg, MEMCG_SWAP_FAIL);
7279 mem_cgroup_id_put(memcg);
7283 /* Get references for the tail pages, too */
7285 mem_cgroup_id_get_many(memcg, nr_pages - 1);
7286 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages);
7287 VM_BUG_ON_PAGE(oldid, page);
7288 mod_memcg_state(memcg, MEMCG_SWAP, nr_pages);
7294 * mem_cgroup_uncharge_swap - uncharge swap space
7295 * @entry: swap entry to uncharge
7296 * @nr_pages: the amount of swap space to uncharge
7298 void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
7300 struct mem_cgroup *memcg;
7303 id = swap_cgroup_record(entry, 0, nr_pages);
7305 memcg = mem_cgroup_from_id(id);
7307 if (!cgroup_memory_noswap && !mem_cgroup_is_root(memcg)) {
7308 if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
7309 page_counter_uncharge(&memcg->swap, nr_pages);
7311 page_counter_uncharge(&memcg->memsw, nr_pages);
7313 mod_memcg_state(memcg, MEMCG_SWAP, -nr_pages);
7314 mem_cgroup_id_put_many(memcg, nr_pages);
7319 long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
7321 long nr_swap_pages = get_nr_swap_pages();
7323 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7324 return nr_swap_pages;
7325 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg))
7326 nr_swap_pages = min_t(long, nr_swap_pages,
7327 READ_ONCE(memcg->swap.max) -
7328 page_counter_read(&memcg->swap));
7329 return nr_swap_pages;
7332 bool mem_cgroup_swap_full(struct page *page)
7334 struct mem_cgroup *memcg;
7336 VM_BUG_ON_PAGE(!PageLocked(page), page);
7340 if (cgroup_memory_noswap || !cgroup_subsys_on_dfl(memory_cgrp_subsys))
7343 memcg = page_memcg(page);
7347 for (; memcg != root_mem_cgroup; memcg = parent_mem_cgroup(memcg)) {
7348 unsigned long usage = page_counter_read(&memcg->swap);
7350 if (usage * 2 >= READ_ONCE(memcg->swap.high) ||
7351 usage * 2 >= READ_ONCE(memcg->swap.max))
7358 static int __init setup_swap_account(char *s)
7360 if (!strcmp(s, "1"))
7361 cgroup_memory_noswap = false;
7362 else if (!strcmp(s, "0"))
7363 cgroup_memory_noswap = true;
7366 __setup("swapaccount=", setup_swap_account);
7368 static u64 swap_current_read(struct cgroup_subsys_state *css,
7371 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
7373 return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE;
7376 static int swap_high_show(struct seq_file *m, void *v)
7378 return seq_puts_memcg_tunable(m,
7379 READ_ONCE(mem_cgroup_from_seq(m)->swap.high));
7382 static ssize_t swap_high_write(struct kernfs_open_file *of,
7383 char *buf, size_t nbytes, loff_t off)
7385 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7389 buf = strstrip(buf);
7390 err = page_counter_memparse(buf, "max", &high);
7394 page_counter_set_high(&memcg->swap, high);
7399 static int swap_max_show(struct seq_file *m, void *v)
7401 return seq_puts_memcg_tunable(m,
7402 READ_ONCE(mem_cgroup_from_seq(m)->swap.max));
7405 static ssize_t swap_max_write(struct kernfs_open_file *of,
7406 char *buf, size_t nbytes, loff_t off)
7408 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
7412 buf = strstrip(buf);
7413 err = page_counter_memparse(buf, "max", &max);
7417 xchg(&memcg->swap.max, max);
7422 static int swap_events_show(struct seq_file *m, void *v)
7424 struct mem_cgroup *memcg = mem_cgroup_from_seq(m);
7426 seq_printf(m, "high %lu\n",
7427 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_HIGH]));
7428 seq_printf(m, "max %lu\n",
7429 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_MAX]));
7430 seq_printf(m, "fail %lu\n",
7431 atomic_long_read(&memcg->memory_events[MEMCG_SWAP_FAIL]));
7436 static struct cftype swap_files[] = {
7438 .name = "swap.current",
7439 .flags = CFTYPE_NOT_ON_ROOT,
7440 .read_u64 = swap_current_read,
7443 .name = "swap.high",
7444 .flags = CFTYPE_NOT_ON_ROOT,
7445 .seq_show = swap_high_show,
7446 .write = swap_high_write,
7450 .flags = CFTYPE_NOT_ON_ROOT,
7451 .seq_show = swap_max_show,
7452 .write = swap_max_write,
7455 .name = "swap.events",
7456 .flags = CFTYPE_NOT_ON_ROOT,
7457 .file_offset = offsetof(struct mem_cgroup, swap_events_file),
7458 .seq_show = swap_events_show,
7463 static struct cftype memsw_files[] = {
7465 .name = "memsw.usage_in_bytes",
7466 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
7467 .read_u64 = mem_cgroup_read_u64,
7470 .name = "memsw.max_usage_in_bytes",
7471 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
7472 .write = mem_cgroup_reset,
7473 .read_u64 = mem_cgroup_read_u64,
7476 .name = "memsw.limit_in_bytes",
7477 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
7478 .write = mem_cgroup_write,
7479 .read_u64 = mem_cgroup_read_u64,
7482 .name = "memsw.failcnt",
7483 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
7484 .write = mem_cgroup_reset,
7485 .read_u64 = mem_cgroup_read_u64,
7487 { }, /* terminate */
7491 * If mem_cgroup_swap_init() is implemented as a subsys_initcall()
7492 * instead of a core_initcall(), this could mean cgroup_memory_noswap still
7493 * remains set to false even when memcg is disabled via "cgroup_disable=memory"
7494 * boot parameter. This may result in premature OOPS inside
7495 * mem_cgroup_get_nr_swap_pages() function in corner cases.
7497 static int __init mem_cgroup_swap_init(void)
7499 /* No memory control -> no swap control */
7500 if (mem_cgroup_disabled())
7501 cgroup_memory_noswap = true;
7503 if (cgroup_memory_noswap)
7506 WARN_ON(cgroup_add_dfl_cftypes(&memory_cgrp_subsys, swap_files));
7507 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, memsw_files));
7511 core_initcall(mem_cgroup_swap_init);
7513 #endif /* CONFIG_MEMCG_SWAP */