1 /* memcontrol.c - Memory Controller
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
10 * Copyright (C) 2009 Nokia Corporation
11 * Author: Kirill A. Shutemov
13 * Kernel Memory Controller
14 * Copyright (C) 2012 Parallels Inc. and Google Inc.
15 * Authors: Glauber Costa and Suleiman Souhlal
18 * Charge lifetime sanitation
19 * Lockless page tracking & accounting
20 * Unified hierarchy configuration model
21 * Copyright (C) 2015 Red Hat, Inc., Johannes Weiner
23 * This program is free software; you can redistribute it and/or modify
24 * it under the terms of the GNU General Public License as published by
25 * the Free Software Foundation; either version 2 of the License, or
26 * (at your option) any later version.
28 * This program is distributed in the hope that it will be useful,
29 * but WITHOUT ANY WARRANTY; without even the implied warranty of
30 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 * GNU General Public License for more details.
34 #include <linux/page_counter.h>
35 #include <linux/memcontrol.h>
36 #include <linux/cgroup.h>
38 #include <linux/hugetlb.h>
39 #include <linux/pagemap.h>
40 #include <linux/smp.h>
41 #include <linux/page-flags.h>
42 #include <linux/backing-dev.h>
43 #include <linux/bit_spinlock.h>
44 #include <linux/rcupdate.h>
45 #include <linux/limits.h>
46 #include <linux/export.h>
47 #include <linux/mutex.h>
48 #include <linux/rbtree.h>
49 #include <linux/slab.h>
50 #include <linux/swap.h>
51 #include <linux/swapops.h>
52 #include <linux/spinlock.h>
53 #include <linux/eventfd.h>
54 #include <linux/poll.h>
55 #include <linux/sort.h>
57 #include <linux/seq_file.h>
58 #include <linux/vmpressure.h>
59 #include <linux/mm_inline.h>
60 #include <linux/swap_cgroup.h>
61 #include <linux/cpu.h>
62 #include <linux/oom.h>
63 #include <linux/lockdep.h>
64 #include <linux/file.h>
65 #include <linux/tracehook.h>
69 #include <net/tcp_memcontrol.h>
72 #include <asm/uaccess.h>
74 #include <trace/events/vmscan.h>
76 struct cgroup_subsys memory_cgrp_subsys __read_mostly;
77 EXPORT_SYMBOL(memory_cgrp_subsys);
79 #define MEM_CGROUP_RECLAIM_RETRIES 5
80 static struct mem_cgroup *root_mem_cgroup __read_mostly;
81 struct cgroup_subsys_state *mem_cgroup_root_css __read_mostly;
83 /* Whether the swap controller is active */
84 #ifdef CONFIG_MEMCG_SWAP
85 int do_swap_account __read_mostly;
87 #define do_swap_account 0
90 static const char * const mem_cgroup_stat_names[] = {
100 static const char * const mem_cgroup_events_names[] = {
107 static const char * const mem_cgroup_lru_names[] = {
115 #define THRESHOLDS_EVENTS_TARGET 128
116 #define SOFTLIMIT_EVENTS_TARGET 1024
117 #define NUMAINFO_EVENTS_TARGET 1024
120 * Cgroups above their limits are maintained in a RB-Tree, independent of
121 * their hierarchy representation
124 struct mem_cgroup_tree_per_zone {
125 struct rb_root rb_root;
129 struct mem_cgroup_tree_per_node {
130 struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
133 struct mem_cgroup_tree {
134 struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
137 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
140 struct mem_cgroup_eventfd_list {
141 struct list_head list;
142 struct eventfd_ctx *eventfd;
146 * cgroup_event represents events which userspace want to receive.
148 struct mem_cgroup_event {
150 * memcg which the event belongs to.
152 struct mem_cgroup *memcg;
154 * eventfd to signal userspace about the event.
156 struct eventfd_ctx *eventfd;
158 * Each of these stored in a list by the cgroup.
160 struct list_head list;
162 * register_event() callback will be used to add new userspace
163 * waiter for changes related to this event. Use eventfd_signal()
164 * on eventfd to send notification to userspace.
166 int (*register_event)(struct mem_cgroup *memcg,
167 struct eventfd_ctx *eventfd, const char *args);
169 * unregister_event() callback will be called when userspace closes
170 * the eventfd or on cgroup removing. This callback must be set,
171 * if you want provide notification functionality.
173 void (*unregister_event)(struct mem_cgroup *memcg,
174 struct eventfd_ctx *eventfd);
176 * All fields below needed to unregister event when
177 * userspace closes eventfd.
180 wait_queue_head_t *wqh;
182 struct work_struct remove;
185 static void mem_cgroup_threshold(struct mem_cgroup *memcg);
186 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
188 /* Stuffs for move charges at task migration. */
190 * Types of charges to be moved.
192 #define MOVE_ANON 0x1U
193 #define MOVE_FILE 0x2U
194 #define MOVE_MASK (MOVE_ANON | MOVE_FILE)
196 /* "mc" and its members are protected by cgroup_mutex */
197 static struct move_charge_struct {
198 spinlock_t lock; /* for from, to */
199 struct mem_cgroup *from;
200 struct mem_cgroup *to;
202 unsigned long precharge;
203 unsigned long moved_charge;
204 unsigned long moved_swap;
205 struct task_struct *moving_task; /* a task moving charges */
206 wait_queue_head_t waitq; /* a waitq for other context */
208 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
209 .waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
213 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
214 * limit reclaim to prevent infinite loops, if they ever occur.
216 #define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
217 #define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
220 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
221 MEM_CGROUP_CHARGE_TYPE_ANON,
222 MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
223 MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
227 /* for encoding cft->private value on file */
235 #define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
236 #define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
237 #define MEMFILE_ATTR(val) ((val) & 0xffff)
238 /* Used for OOM nofiier */
239 #define OOM_CONTROL (0)
242 * The memcg_create_mutex will be held whenever a new cgroup is created.
243 * As a consequence, any change that needs to protect against new child cgroups
244 * appearing has to hold it as well.
246 static DEFINE_MUTEX(memcg_create_mutex);
248 /* Some nice accessors for the vmpressure. */
249 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
252 memcg = root_mem_cgroup;
253 return &memcg->vmpressure;
256 struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr)
258 return &container_of(vmpr, struct mem_cgroup, vmpressure)->css;
261 static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
263 return (memcg == root_mem_cgroup);
267 * We restrict the id in the range of [1, 65535], so it can fit into
270 #define MEM_CGROUP_ID_MAX USHRT_MAX
272 static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg)
274 return memcg->css.id;
278 * A helper function to get mem_cgroup from ID. must be called under
279 * rcu_read_lock(). The caller is responsible for calling
280 * css_tryget_online() if the mem_cgroup is used for charging. (dropping
281 * refcnt from swap can be called against removed memcg.)
283 static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
285 struct cgroup_subsys_state *css;
287 css = css_from_id(id, &memory_cgrp_subsys);
288 return mem_cgroup_from_css(css);
291 /* Writing them here to avoid exposing memcg's inner layout */
292 #if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
294 void sock_update_memcg(struct sock *sk)
296 if (mem_cgroup_sockets_enabled) {
297 struct mem_cgroup *memcg;
298 struct cg_proto *cg_proto;
300 BUG_ON(!sk->sk_prot->proto_cgroup);
302 /* Socket cloning can throw us here with sk_cgrp already
303 * filled. It won't however, necessarily happen from
304 * process context. So the test for root memcg given
305 * the current task's memcg won't help us in this case.
307 * Respecting the original socket's memcg is a better
308 * decision in this case.
311 BUG_ON(mem_cgroup_is_root(sk->sk_cgrp->memcg));
312 css_get(&sk->sk_cgrp->memcg->css);
317 memcg = mem_cgroup_from_task(current);
318 cg_proto = sk->sk_prot->proto_cgroup(memcg);
319 if (cg_proto && test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags) &&
320 css_tryget_online(&memcg->css)) {
321 sk->sk_cgrp = cg_proto;
326 EXPORT_SYMBOL(sock_update_memcg);
328 void sock_release_memcg(struct sock *sk)
330 if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
331 struct mem_cgroup *memcg;
332 WARN_ON(!sk->sk_cgrp->memcg);
333 memcg = sk->sk_cgrp->memcg;
334 css_put(&sk->sk_cgrp->memcg->css);
338 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
340 if (!memcg || mem_cgroup_is_root(memcg))
343 return &memcg->tcp_mem;
345 EXPORT_SYMBOL(tcp_proto_cgroup);
349 #ifdef CONFIG_MEMCG_KMEM
351 * This will be the memcg's index in each cache's ->memcg_params.memcg_caches.
352 * The main reason for not using cgroup id for this:
353 * this works better in sparse environments, where we have a lot of memcgs,
354 * but only a few kmem-limited. Or also, if we have, for instance, 200
355 * memcgs, and none but the 200th is kmem-limited, we'd have to have a
356 * 200 entry array for that.
358 * The current size of the caches array is stored in memcg_nr_cache_ids. It
359 * will double each time we have to increase it.
361 static DEFINE_IDA(memcg_cache_ida);
362 int memcg_nr_cache_ids;
364 /* Protects memcg_nr_cache_ids */
365 static DECLARE_RWSEM(memcg_cache_ids_sem);
367 void memcg_get_cache_ids(void)
369 down_read(&memcg_cache_ids_sem);
372 void memcg_put_cache_ids(void)
374 up_read(&memcg_cache_ids_sem);
378 * MIN_SIZE is different than 1, because we would like to avoid going through
379 * the alloc/free process all the time. In a small machine, 4 kmem-limited
380 * cgroups is a reasonable guess. In the future, it could be a parameter or
381 * tunable, but that is strictly not necessary.
383 * MAX_SIZE should be as large as the number of cgrp_ids. Ideally, we could get
384 * this constant directly from cgroup, but it is understandable that this is
385 * better kept as an internal representation in cgroup.c. In any case, the
386 * cgrp_id space is not getting any smaller, and we don't have to necessarily
387 * increase ours as well if it increases.
389 #define MEMCG_CACHES_MIN_SIZE 4
390 #define MEMCG_CACHES_MAX_SIZE MEM_CGROUP_ID_MAX
393 * A lot of the calls to the cache allocation functions are expected to be
394 * inlined by the compiler. Since the calls to memcg_kmem_get_cache are
395 * conditional to this static branch, we'll have to allow modules that does
396 * kmem_cache_alloc and the such to see this symbol as well
398 struct static_key memcg_kmem_enabled_key;
399 EXPORT_SYMBOL(memcg_kmem_enabled_key);
401 #endif /* CONFIG_MEMCG_KMEM */
403 static struct mem_cgroup_per_zone *
404 mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone)
406 int nid = zone_to_nid(zone);
407 int zid = zone_idx(zone);
409 return &memcg->nodeinfo[nid]->zoneinfo[zid];
413 * mem_cgroup_css_from_page - css of the memcg associated with a page
414 * @page: page of interest
416 * If memcg is bound to the default hierarchy, css of the memcg associated
417 * with @page is returned. The returned css remains associated with @page
418 * until it is released.
420 * If memcg is bound to a traditional hierarchy, the css of root_mem_cgroup
423 * XXX: The above description of behavior on the default hierarchy isn't
424 * strictly true yet as replace_page_cache_page() can modify the
425 * association before @page is released even on the default hierarchy;
426 * however, the current and planned usages don't mix the the two functions
427 * and replace_page_cache_page() will soon be updated to make the invariant
430 struct cgroup_subsys_state *mem_cgroup_css_from_page(struct page *page)
432 struct mem_cgroup *memcg;
436 memcg = page->mem_cgroup;
438 if (!memcg || !cgroup_on_dfl(memcg->css.cgroup))
439 memcg = root_mem_cgroup;
446 * page_cgroup_ino - return inode number of the memcg a page is charged to
449 * Look up the closest online ancestor of the memory cgroup @page is charged to
450 * and return its inode number or 0 if @page is not charged to any cgroup. It
451 * is safe to call this function without holding a reference to @page.
453 * Note, this function is inherently racy, because there is nothing to prevent
454 * the cgroup inode from getting torn down and potentially reallocated a moment
455 * after page_cgroup_ino() returns, so it only should be used by callers that
456 * do not care (such as procfs interfaces).
458 ino_t page_cgroup_ino(struct page *page)
460 struct mem_cgroup *memcg;
461 unsigned long ino = 0;
464 memcg = READ_ONCE(page->mem_cgroup);
465 while (memcg && !(memcg->css.flags & CSS_ONLINE))
466 memcg = parent_mem_cgroup(memcg);
468 ino = cgroup_ino(memcg->css.cgroup);
473 static struct mem_cgroup_per_zone *
474 mem_cgroup_page_zoneinfo(struct mem_cgroup *memcg, struct page *page)
476 int nid = page_to_nid(page);
477 int zid = page_zonenum(page);
479 return &memcg->nodeinfo[nid]->zoneinfo[zid];
482 static struct mem_cgroup_tree_per_zone *
483 soft_limit_tree_node_zone(int nid, int zid)
485 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
488 static struct mem_cgroup_tree_per_zone *
489 soft_limit_tree_from_page(struct page *page)
491 int nid = page_to_nid(page);
492 int zid = page_zonenum(page);
494 return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
497 static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz,
498 struct mem_cgroup_tree_per_zone *mctz,
499 unsigned long new_usage_in_excess)
501 struct rb_node **p = &mctz->rb_root.rb_node;
502 struct rb_node *parent = NULL;
503 struct mem_cgroup_per_zone *mz_node;
508 mz->usage_in_excess = new_usage_in_excess;
509 if (!mz->usage_in_excess)
513 mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
515 if (mz->usage_in_excess < mz_node->usage_in_excess)
518 * We can't avoid mem cgroups that are over their soft
519 * limit by the same amount
521 else if (mz->usage_in_excess >= mz_node->usage_in_excess)
524 rb_link_node(&mz->tree_node, parent, p);
525 rb_insert_color(&mz->tree_node, &mctz->rb_root);
529 static void __mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
530 struct mem_cgroup_tree_per_zone *mctz)
534 rb_erase(&mz->tree_node, &mctz->rb_root);
538 static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz,
539 struct mem_cgroup_tree_per_zone *mctz)
543 spin_lock_irqsave(&mctz->lock, flags);
544 __mem_cgroup_remove_exceeded(mz, mctz);
545 spin_unlock_irqrestore(&mctz->lock, flags);
548 static unsigned long soft_limit_excess(struct mem_cgroup *memcg)
550 unsigned long nr_pages = page_counter_read(&memcg->memory);
551 unsigned long soft_limit = READ_ONCE(memcg->soft_limit);
552 unsigned long excess = 0;
554 if (nr_pages > soft_limit)
555 excess = nr_pages - soft_limit;
560 static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page)
562 unsigned long excess;
563 struct mem_cgroup_per_zone *mz;
564 struct mem_cgroup_tree_per_zone *mctz;
566 mctz = soft_limit_tree_from_page(page);
568 * Necessary to update all ancestors when hierarchy is used.
569 * because their event counter is not touched.
571 for (; memcg; memcg = parent_mem_cgroup(memcg)) {
572 mz = mem_cgroup_page_zoneinfo(memcg, page);
573 excess = soft_limit_excess(memcg);
575 * We have to update the tree if mz is on RB-tree or
576 * mem is over its softlimit.
578 if (excess || mz->on_tree) {
581 spin_lock_irqsave(&mctz->lock, flags);
582 /* if on-tree, remove it */
584 __mem_cgroup_remove_exceeded(mz, mctz);
586 * Insert again. mz->usage_in_excess will be updated.
587 * If excess is 0, no tree ops.
589 __mem_cgroup_insert_exceeded(mz, mctz, excess);
590 spin_unlock_irqrestore(&mctz->lock, flags);
595 static void mem_cgroup_remove_from_trees(struct mem_cgroup *memcg)
597 struct mem_cgroup_tree_per_zone *mctz;
598 struct mem_cgroup_per_zone *mz;
602 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
603 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
604 mctz = soft_limit_tree_node_zone(nid, zid);
605 mem_cgroup_remove_exceeded(mz, mctz);
610 static struct mem_cgroup_per_zone *
611 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
613 struct rb_node *rightmost = NULL;
614 struct mem_cgroup_per_zone *mz;
618 rightmost = rb_last(&mctz->rb_root);
620 goto done; /* Nothing to reclaim from */
622 mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
624 * Remove the node now but someone else can add it back,
625 * we will to add it back at the end of reclaim to its correct
626 * position in the tree.
628 __mem_cgroup_remove_exceeded(mz, mctz);
629 if (!soft_limit_excess(mz->memcg) ||
630 !css_tryget_online(&mz->memcg->css))
636 static struct mem_cgroup_per_zone *
637 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
639 struct mem_cgroup_per_zone *mz;
641 spin_lock_irq(&mctz->lock);
642 mz = __mem_cgroup_largest_soft_limit_node(mctz);
643 spin_unlock_irq(&mctz->lock);
648 * Return page count for single (non recursive) @memcg.
650 * Implementation Note: reading percpu statistics for memcg.
652 * Both of vmstat[] and percpu_counter has threshold and do periodic
653 * synchronization to implement "quick" read. There are trade-off between
654 * reading cost and precision of value. Then, we may have a chance to implement
655 * a periodic synchronization of counter in memcg's counter.
657 * But this _read() function is used for user interface now. The user accounts
658 * memory usage by memory cgroup and he _always_ requires exact value because
659 * he accounts memory. Even if we provide quick-and-fuzzy read, we always
660 * have to visit all online cpus and make sum. So, for now, unnecessary
661 * synchronization is not implemented. (just implemented for cpu hotplug)
663 * If there are kernel internal actions which can make use of some not-exact
664 * value, and reading all cpu value can be performance bottleneck in some
665 * common workload, threshold and synchronization as vmstat[] should be
669 mem_cgroup_read_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx)
674 /* Per-cpu values can be negative, use a signed accumulator */
675 for_each_possible_cpu(cpu)
676 val += per_cpu(memcg->stat->count[idx], cpu);
678 * Summing races with updates, so val may be negative. Avoid exposing
679 * transient negative values.
686 static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
687 enum mem_cgroup_events_index idx)
689 unsigned long val = 0;
692 for_each_possible_cpu(cpu)
693 val += per_cpu(memcg->stat->events[idx], cpu);
697 static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
702 * Here, RSS means 'mapped anon' and anon's SwapCache. Shmem/tmpfs is
703 * counted as CACHE even if it's on ANON LRU.
706 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS],
709 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_CACHE],
712 if (PageTransHuge(page))
713 __this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
716 /* pagein of a big page is an event. So, ignore page size */
718 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
720 __this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
721 nr_pages = -nr_pages; /* for event */
724 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
727 static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
729 unsigned int lru_mask)
731 unsigned long nr = 0;
734 VM_BUG_ON((unsigned)nid >= nr_node_ids);
736 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
737 struct mem_cgroup_per_zone *mz;
741 if (!(BIT(lru) & lru_mask))
743 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
744 nr += mz->lru_size[lru];
750 static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg,
751 unsigned int lru_mask)
753 unsigned long nr = 0;
756 for_each_node_state(nid, N_MEMORY)
757 nr += mem_cgroup_node_nr_lru_pages(memcg, nid, lru_mask);
761 static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
762 enum mem_cgroup_events_target target)
764 unsigned long val, next;
766 val = __this_cpu_read(memcg->stat->nr_page_events);
767 next = __this_cpu_read(memcg->stat->targets[target]);
768 /* from time_after() in jiffies.h */
769 if ((long)next - (long)val < 0) {
771 case MEM_CGROUP_TARGET_THRESH:
772 next = val + THRESHOLDS_EVENTS_TARGET;
774 case MEM_CGROUP_TARGET_SOFTLIMIT:
775 next = val + SOFTLIMIT_EVENTS_TARGET;
777 case MEM_CGROUP_TARGET_NUMAINFO:
778 next = val + NUMAINFO_EVENTS_TARGET;
783 __this_cpu_write(memcg->stat->targets[target], next);
790 * Check events in order.
793 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
795 /* threshold event is triggered in finer grain than soft limit */
796 if (unlikely(mem_cgroup_event_ratelimit(memcg,
797 MEM_CGROUP_TARGET_THRESH))) {
799 bool do_numainfo __maybe_unused;
801 do_softlimit = mem_cgroup_event_ratelimit(memcg,
802 MEM_CGROUP_TARGET_SOFTLIMIT);
804 do_numainfo = mem_cgroup_event_ratelimit(memcg,
805 MEM_CGROUP_TARGET_NUMAINFO);
807 mem_cgroup_threshold(memcg);
808 if (unlikely(do_softlimit))
809 mem_cgroup_update_tree(memcg, page);
811 if (unlikely(do_numainfo))
812 atomic_inc(&memcg->numainfo_events);
817 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
820 * mm_update_next_owner() may clear mm->owner to NULL
821 * if it races with swapoff, page migration, etc.
822 * So this can be called with p == NULL.
827 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
829 EXPORT_SYMBOL(mem_cgroup_from_task);
831 static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
833 struct mem_cgroup *memcg = NULL;
838 * Page cache insertions can happen withou an
839 * actual mm context, e.g. during disk probing
840 * on boot, loopback IO, acct() writes etc.
843 memcg = root_mem_cgroup;
845 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
846 if (unlikely(!memcg))
847 memcg = root_mem_cgroup;
849 } while (!css_tryget_online(&memcg->css));
855 * mem_cgroup_iter - iterate over memory cgroup hierarchy
856 * @root: hierarchy root
857 * @prev: previously returned memcg, NULL on first invocation
858 * @reclaim: cookie for shared reclaim walks, NULL for full walks
860 * Returns references to children of the hierarchy below @root, or
861 * @root itself, or %NULL after a full round-trip.
863 * Caller must pass the return value in @prev on subsequent
864 * invocations for reference counting, or use mem_cgroup_iter_break()
865 * to cancel a hierarchy walk before the round-trip is complete.
867 * Reclaimers can specify a zone and a priority level in @reclaim to
868 * divide up the memcgs in the hierarchy among all concurrent
869 * reclaimers operating on the same zone and priority.
871 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
872 struct mem_cgroup *prev,
873 struct mem_cgroup_reclaim_cookie *reclaim)
875 struct mem_cgroup_reclaim_iter *uninitialized_var(iter);
876 struct cgroup_subsys_state *css = NULL;
877 struct mem_cgroup *memcg = NULL;
878 struct mem_cgroup *pos = NULL;
880 if (mem_cgroup_disabled())
884 root = root_mem_cgroup;
886 if (prev && !reclaim)
889 if (!root->use_hierarchy && root != root_mem_cgroup) {
898 struct mem_cgroup_per_zone *mz;
900 mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone);
901 iter = &mz->iter[reclaim->priority];
903 if (prev && reclaim->generation != iter->generation)
907 pos = READ_ONCE(iter->position);
909 * A racing update may change the position and
910 * put the last reference, hence css_tryget(),
911 * or retry to see the updated position.
913 } while (pos && !css_tryget(&pos->css));
920 css = css_next_descendant_pre(css, &root->css);
923 * Reclaimers share the hierarchy walk, and a
924 * new one might jump in right at the end of
925 * the hierarchy - make sure they see at least
926 * one group and restart from the beginning.
934 * Verify the css and acquire a reference. The root
935 * is provided by the caller, so we know it's alive
936 * and kicking, and don't take an extra reference.
938 memcg = mem_cgroup_from_css(css);
940 if (css == &root->css)
943 if (css_tryget(css)) {
945 * Make sure the memcg is initialized:
946 * mem_cgroup_css_online() orders the the
947 * initialization against setting the flag.
949 if (smp_load_acquire(&memcg->initialized))
959 if (cmpxchg(&iter->position, pos, memcg) == pos) {
961 css_get(&memcg->css);
967 * pairs with css_tryget when dereferencing iter->position
976 reclaim->generation = iter->generation;
982 if (prev && prev != root)
989 * mem_cgroup_iter_break - abort a hierarchy walk prematurely
990 * @root: hierarchy root
991 * @prev: last visited hierarchy member as returned by mem_cgroup_iter()
993 void mem_cgroup_iter_break(struct mem_cgroup *root,
994 struct mem_cgroup *prev)
997 root = root_mem_cgroup;
998 if (prev && prev != root)
1003 * Iteration constructs for visiting all cgroups (under a tree). If
1004 * loops are exited prematurely (break), mem_cgroup_iter_break() must
1005 * be used for reference counting.
1007 #define for_each_mem_cgroup_tree(iter, root) \
1008 for (iter = mem_cgroup_iter(root, NULL, NULL); \
1010 iter = mem_cgroup_iter(root, iter, NULL))
1012 #define for_each_mem_cgroup(iter) \
1013 for (iter = mem_cgroup_iter(NULL, NULL, NULL); \
1015 iter = mem_cgroup_iter(NULL, iter, NULL))
1018 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1019 * @zone: zone of the wanted lruvec
1020 * @memcg: memcg of the wanted lruvec
1022 * Returns the lru list vector holding pages for the given @zone and
1023 * @mem. This can be the global zone lruvec, if the memory controller
1026 struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1027 struct mem_cgroup *memcg)
1029 struct mem_cgroup_per_zone *mz;
1030 struct lruvec *lruvec;
1032 if (mem_cgroup_disabled()) {
1033 lruvec = &zone->lruvec;
1037 mz = mem_cgroup_zone_zoneinfo(memcg, zone);
1038 lruvec = &mz->lruvec;
1041 * Since a node can be onlined after the mem_cgroup was created,
1042 * we have to be prepared to initialize lruvec->zone here;
1043 * and if offlined then reonlined, we need to reinitialize it.
1045 if (unlikely(lruvec->zone != zone))
1046 lruvec->zone = zone;
1051 * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page
1053 * @zone: zone of the page
1055 * This function is only safe when following the LRU page isolation
1056 * and putback protocol: the LRU lock must be held, and the page must
1057 * either be PageLRU() or the caller must have isolated/allocated it.
1059 struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
1061 struct mem_cgroup_per_zone *mz;
1062 struct mem_cgroup *memcg;
1063 struct lruvec *lruvec;
1065 if (mem_cgroup_disabled()) {
1066 lruvec = &zone->lruvec;
1070 memcg = page->mem_cgroup;
1072 * Swapcache readahead pages are added to the LRU - and
1073 * possibly migrated - before they are charged.
1076 memcg = root_mem_cgroup;
1078 mz = mem_cgroup_page_zoneinfo(memcg, page);
1079 lruvec = &mz->lruvec;
1082 * Since a node can be onlined after the mem_cgroup was created,
1083 * we have to be prepared to initialize lruvec->zone here;
1084 * and if offlined then reonlined, we need to reinitialize it.
1086 if (unlikely(lruvec->zone != zone))
1087 lruvec->zone = zone;
1092 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1093 * @lruvec: mem_cgroup per zone lru vector
1094 * @lru: index of lru list the page is sitting on
1095 * @nr_pages: positive when adding or negative when removing
1097 * This function must be called when a page is added to or removed from an
1100 void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1103 struct mem_cgroup_per_zone *mz;
1104 unsigned long *lru_size;
1106 if (mem_cgroup_disabled())
1109 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1110 lru_size = mz->lru_size + lru;
1111 *lru_size += nr_pages;
1112 VM_BUG_ON((long)(*lru_size) < 0);
1115 bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg)
1117 struct mem_cgroup *task_memcg;
1118 struct task_struct *p;
1121 p = find_lock_task_mm(task);
1123 task_memcg = get_mem_cgroup_from_mm(p->mm);
1127 * All threads may have already detached their mm's, but the oom
1128 * killer still needs to detect if they have already been oom
1129 * killed to prevent needlessly killing additional tasks.
1132 task_memcg = mem_cgroup_from_task(task);
1133 css_get(&task_memcg->css);
1136 ret = mem_cgroup_is_descendant(task_memcg, memcg);
1137 css_put(&task_memcg->css);
1141 #define mem_cgroup_from_counter(counter, member) \
1142 container_of(counter, struct mem_cgroup, member)
1145 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1146 * @memcg: the memory cgroup
1148 * Returns the maximum amount of memory @mem can be charged with, in
1151 static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
1153 unsigned long margin = 0;
1154 unsigned long count;
1155 unsigned long limit;
1157 count = page_counter_read(&memcg->memory);
1158 limit = READ_ONCE(memcg->memory.limit);
1160 margin = limit - count;
1162 if (do_swap_account) {
1163 count = page_counter_read(&memcg->memsw);
1164 limit = READ_ONCE(memcg->memsw.limit);
1166 margin = min(margin, limit - count);
1173 * A routine for checking "mem" is under move_account() or not.
1175 * Checking a cgroup is mc.from or mc.to or under hierarchy of
1176 * moving cgroups. This is for waiting at high-memory pressure
1179 static bool mem_cgroup_under_move(struct mem_cgroup *memcg)
1181 struct mem_cgroup *from;
1182 struct mem_cgroup *to;
1185 * Unlike task_move routines, we access mc.to, mc.from not under
1186 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1188 spin_lock(&mc.lock);
1194 ret = mem_cgroup_is_descendant(from, memcg) ||
1195 mem_cgroup_is_descendant(to, memcg);
1197 spin_unlock(&mc.lock);
1201 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg)
1203 if (mc.moving_task && current != mc.moving_task) {
1204 if (mem_cgroup_under_move(memcg)) {
1206 prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1207 /* moving charge context might have finished. */
1210 finish_wait(&mc.waitq, &wait);
1217 #define K(x) ((x) << (PAGE_SHIFT-10))
1219 * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller.
1220 * @memcg: The memory cgroup that went over limit
1221 * @p: Task that is going to be killed
1223 * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1226 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1228 /* oom_info_lock ensures that parallel ooms do not interleave */
1229 static DEFINE_MUTEX(oom_info_lock);
1230 struct mem_cgroup *iter;
1233 mutex_lock(&oom_info_lock);
1237 pr_info("Task in ");
1238 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1239 pr_cont(" killed as a result of limit of ");
1241 pr_info("Memory limit reached of cgroup ");
1244 pr_cont_cgroup_path(memcg->css.cgroup);
1249 pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n",
1250 K((u64)page_counter_read(&memcg->memory)),
1251 K((u64)memcg->memory.limit), memcg->memory.failcnt);
1252 pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n",
1253 K((u64)page_counter_read(&memcg->memsw)),
1254 K((u64)memcg->memsw.limit), memcg->memsw.failcnt);
1255 pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n",
1256 K((u64)page_counter_read(&memcg->kmem)),
1257 K((u64)memcg->kmem.limit), memcg->kmem.failcnt);
1259 for_each_mem_cgroup_tree(iter, memcg) {
1260 pr_info("Memory cgroup stats for ");
1261 pr_cont_cgroup_path(iter->css.cgroup);
1264 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
1265 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
1267 pr_cont(" %s:%luKB", mem_cgroup_stat_names[i],
1268 K(mem_cgroup_read_stat(iter, i)));
1271 for (i = 0; i < NR_LRU_LISTS; i++)
1272 pr_cont(" %s:%luKB", mem_cgroup_lru_names[i],
1273 K(mem_cgroup_nr_lru_pages(iter, BIT(i))));
1277 mutex_unlock(&oom_info_lock);
1281 * This function returns the number of memcg under hierarchy tree. Returns
1282 * 1(self count) if no children.
1284 static int mem_cgroup_count_children(struct mem_cgroup *memcg)
1287 struct mem_cgroup *iter;
1289 for_each_mem_cgroup_tree(iter, memcg)
1295 * Return the memory (and swap, if configured) limit for a memcg.
1297 static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg)
1299 unsigned long limit;
1301 limit = memcg->memory.limit;
1302 if (mem_cgroup_swappiness(memcg)) {
1303 unsigned long memsw_limit;
1305 memsw_limit = memcg->memsw.limit;
1306 limit = min(limit + total_swap_pages, memsw_limit);
1311 static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
1314 struct oom_control oc = {
1317 .gfp_mask = gfp_mask,
1320 struct mem_cgroup *iter;
1321 unsigned long chosen_points = 0;
1322 unsigned long totalpages;
1323 unsigned int points = 0;
1324 struct task_struct *chosen = NULL;
1326 mutex_lock(&oom_lock);
1329 * If current has a pending SIGKILL or is exiting, then automatically
1330 * select it. The goal is to allow it to allocate so that it may
1331 * quickly exit and free its memory.
1333 if (fatal_signal_pending(current) || task_will_free_mem(current)) {
1334 mark_oom_victim(current);
1338 check_panic_on_oom(&oc, CONSTRAINT_MEMCG, memcg);
1339 totalpages = mem_cgroup_get_limit(memcg) ? : 1;
1340 for_each_mem_cgroup_tree(iter, memcg) {
1341 struct css_task_iter it;
1342 struct task_struct *task;
1344 css_task_iter_start(&iter->css, &it);
1345 while ((task = css_task_iter_next(&it))) {
1346 switch (oom_scan_process_thread(&oc, task, totalpages)) {
1347 case OOM_SCAN_SELECT:
1349 put_task_struct(chosen);
1351 chosen_points = ULONG_MAX;
1352 get_task_struct(chosen);
1354 case OOM_SCAN_CONTINUE:
1356 case OOM_SCAN_ABORT:
1357 css_task_iter_end(&it);
1358 mem_cgroup_iter_break(memcg, iter);
1360 put_task_struct(chosen);
1365 points = oom_badness(task, memcg, NULL, totalpages);
1366 if (!points || points < chosen_points)
1368 /* Prefer thread group leaders for display purposes */
1369 if (points == chosen_points &&
1370 thread_group_leader(chosen))
1374 put_task_struct(chosen);
1376 chosen_points = points;
1377 get_task_struct(chosen);
1379 css_task_iter_end(&it);
1383 points = chosen_points * 1000 / totalpages;
1384 oom_kill_process(&oc, chosen, points, totalpages, memcg,
1385 "Memory cgroup out of memory");
1388 mutex_unlock(&oom_lock);
1391 #if MAX_NUMNODES > 1
1394 * test_mem_cgroup_node_reclaimable
1395 * @memcg: the target memcg
1396 * @nid: the node ID to be checked.
1397 * @noswap : specify true here if the user wants flle only information.
1399 * This function returns whether the specified memcg contains any
1400 * reclaimable pages on a node. Returns true if there are any reclaimable
1401 * pages in the node.
1403 static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *memcg,
1404 int nid, bool noswap)
1406 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_FILE))
1408 if (noswap || !total_swap_pages)
1410 if (mem_cgroup_node_nr_lru_pages(memcg, nid, LRU_ALL_ANON))
1417 * Always updating the nodemask is not very good - even if we have an empty
1418 * list or the wrong list here, we can start from some node and traverse all
1419 * nodes based on the zonelist. So update the list loosely once per 10 secs.
1422 static void mem_cgroup_may_update_nodemask(struct mem_cgroup *memcg)
1426 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1427 * pagein/pageout changes since the last update.
1429 if (!atomic_read(&memcg->numainfo_events))
1431 if (atomic_inc_return(&memcg->numainfo_updating) > 1)
1434 /* make a nodemask where this memcg uses memory from */
1435 memcg->scan_nodes = node_states[N_MEMORY];
1437 for_each_node_mask(nid, node_states[N_MEMORY]) {
1439 if (!test_mem_cgroup_node_reclaimable(memcg, nid, false))
1440 node_clear(nid, memcg->scan_nodes);
1443 atomic_set(&memcg->numainfo_events, 0);
1444 atomic_set(&memcg->numainfo_updating, 0);
1448 * Selecting a node where we start reclaim from. Because what we need is just
1449 * reducing usage counter, start from anywhere is O,K. Considering
1450 * memory reclaim from current node, there are pros. and cons.
1452 * Freeing memory from current node means freeing memory from a node which
1453 * we'll use or we've used. So, it may make LRU bad. And if several threads
1454 * hit limits, it will see a contention on a node. But freeing from remote
1455 * node means more costs for memory reclaim because of memory latency.
1457 * Now, we use round-robin. Better algorithm is welcomed.
1459 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1463 mem_cgroup_may_update_nodemask(memcg);
1464 node = memcg->last_scanned_node;
1466 node = next_node(node, memcg->scan_nodes);
1467 if (node == MAX_NUMNODES)
1468 node = first_node(memcg->scan_nodes);
1470 * We call this when we hit limit, not when pages are added to LRU.
1471 * No LRU may hold pages because all pages are UNEVICTABLE or
1472 * memcg is too small and all pages are not on LRU. In that case,
1473 * we use curret node.
1475 if (unlikely(node == MAX_NUMNODES))
1476 node = numa_node_id();
1478 memcg->last_scanned_node = node;
1482 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1488 static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
1491 unsigned long *total_scanned)
1493 struct mem_cgroup *victim = NULL;
1496 unsigned long excess;
1497 unsigned long nr_scanned;
1498 struct mem_cgroup_reclaim_cookie reclaim = {
1503 excess = soft_limit_excess(root_memcg);
1506 victim = mem_cgroup_iter(root_memcg, victim, &reclaim);
1511 * If we have not been able to reclaim
1512 * anything, it might because there are
1513 * no reclaimable pages under this hierarchy
1518 * We want to do more targeted reclaim.
1519 * excess >> 2 is not to excessive so as to
1520 * reclaim too much, nor too less that we keep
1521 * coming back to reclaim from this cgroup
1523 if (total >= (excess >> 2) ||
1524 (loop > MEM_CGROUP_MAX_RECLAIM_LOOPS))
1529 total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false,
1531 *total_scanned += nr_scanned;
1532 if (!soft_limit_excess(root_memcg))
1535 mem_cgroup_iter_break(root_memcg, victim);
1539 #ifdef CONFIG_LOCKDEP
1540 static struct lockdep_map memcg_oom_lock_dep_map = {
1541 .name = "memcg_oom_lock",
1545 static DEFINE_SPINLOCK(memcg_oom_lock);
1548 * Check OOM-Killer is already running under our hierarchy.
1549 * If someone is running, return false.
1551 static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
1553 struct mem_cgroup *iter, *failed = NULL;
1555 spin_lock(&memcg_oom_lock);
1557 for_each_mem_cgroup_tree(iter, memcg) {
1558 if (iter->oom_lock) {
1560 * this subtree of our hierarchy is already locked
1561 * so we cannot give a lock.
1564 mem_cgroup_iter_break(memcg, iter);
1567 iter->oom_lock = true;
1572 * OK, we failed to lock the whole subtree so we have
1573 * to clean up what we set up to the failing subtree
1575 for_each_mem_cgroup_tree(iter, memcg) {
1576 if (iter == failed) {
1577 mem_cgroup_iter_break(memcg, iter);
1580 iter->oom_lock = false;
1583 mutex_acquire(&memcg_oom_lock_dep_map, 0, 1, _RET_IP_);
1585 spin_unlock(&memcg_oom_lock);
1590 static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
1592 struct mem_cgroup *iter;
1594 spin_lock(&memcg_oom_lock);
1595 mutex_release(&memcg_oom_lock_dep_map, 1, _RET_IP_);
1596 for_each_mem_cgroup_tree(iter, memcg)
1597 iter->oom_lock = false;
1598 spin_unlock(&memcg_oom_lock);
1601 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
1603 struct mem_cgroup *iter;
1605 spin_lock(&memcg_oom_lock);
1606 for_each_mem_cgroup_tree(iter, memcg)
1608 spin_unlock(&memcg_oom_lock);
1611 static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
1613 struct mem_cgroup *iter;
1616 * When a new child is created while the hierarchy is under oom,
1617 * mem_cgroup_oom_lock() may not be called. Watch for underflow.
1619 spin_lock(&memcg_oom_lock);
1620 for_each_mem_cgroup_tree(iter, memcg)
1621 if (iter->under_oom > 0)
1623 spin_unlock(&memcg_oom_lock);
1626 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1628 struct oom_wait_info {
1629 struct mem_cgroup *memcg;
1633 static int memcg_oom_wake_function(wait_queue_t *wait,
1634 unsigned mode, int sync, void *arg)
1636 struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg;
1637 struct mem_cgroup *oom_wait_memcg;
1638 struct oom_wait_info *oom_wait_info;
1640 oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1641 oom_wait_memcg = oom_wait_info->memcg;
1643 if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) &&
1644 !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg))
1646 return autoremove_wake_function(wait, mode, sync, arg);
1649 static void memcg_oom_recover(struct mem_cgroup *memcg)
1652 * For the following lockless ->under_oom test, the only required
1653 * guarantee is that it must see the state asserted by an OOM when
1654 * this function is called as a result of userland actions
1655 * triggered by the notification of the OOM. This is trivially
1656 * achieved by invoking mem_cgroup_mark_under_oom() before
1657 * triggering notification.
1659 if (memcg && memcg->under_oom)
1660 __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
1663 static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
1665 if (!current->memcg_may_oom)
1668 * We are in the middle of the charge context here, so we
1669 * don't want to block when potentially sitting on a callstack
1670 * that holds all kinds of filesystem and mm locks.
1672 * Also, the caller may handle a failed allocation gracefully
1673 * (like optional page cache readahead) and so an OOM killer
1674 * invocation might not even be necessary.
1676 * That's why we don't do anything here except remember the
1677 * OOM context and then deal with it at the end of the page
1678 * fault when the stack is unwound, the locks are released,
1679 * and when we know whether the fault was overall successful.
1681 css_get(&memcg->css);
1682 current->memcg_in_oom = memcg;
1683 current->memcg_oom_gfp_mask = mask;
1684 current->memcg_oom_order = order;
1688 * mem_cgroup_oom_synchronize - complete memcg OOM handling
1689 * @handle: actually kill/wait or just clean up the OOM state
1691 * This has to be called at the end of a page fault if the memcg OOM
1692 * handler was enabled.
1694 * Memcg supports userspace OOM handling where failed allocations must
1695 * sleep on a waitqueue until the userspace task resolves the
1696 * situation. Sleeping directly in the charge context with all kinds
1697 * of locks held is not a good idea, instead we remember an OOM state
1698 * in the task and mem_cgroup_oom_synchronize() has to be called at
1699 * the end of the page fault to complete the OOM handling.
1701 * Returns %true if an ongoing memcg OOM situation was detected and
1702 * completed, %false otherwise.
1704 bool mem_cgroup_oom_synchronize(bool handle)
1706 struct mem_cgroup *memcg = current->memcg_in_oom;
1707 struct oom_wait_info owait;
1710 /* OOM is global, do not handle */
1714 if (!handle || oom_killer_disabled)
1717 owait.memcg = memcg;
1718 owait.wait.flags = 0;
1719 owait.wait.func = memcg_oom_wake_function;
1720 owait.wait.private = current;
1721 INIT_LIST_HEAD(&owait.wait.task_list);
1723 prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1724 mem_cgroup_mark_under_oom(memcg);
1726 locked = mem_cgroup_oom_trylock(memcg);
1729 mem_cgroup_oom_notify(memcg);
1731 if (locked && !memcg->oom_kill_disable) {
1732 mem_cgroup_unmark_under_oom(memcg);
1733 finish_wait(&memcg_oom_waitq, &owait.wait);
1734 mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask,
1735 current->memcg_oom_order);
1738 mem_cgroup_unmark_under_oom(memcg);
1739 finish_wait(&memcg_oom_waitq, &owait.wait);
1743 mem_cgroup_oom_unlock(memcg);
1745 * There is no guarantee that an OOM-lock contender
1746 * sees the wakeups triggered by the OOM kill
1747 * uncharges. Wake any sleepers explicitely.
1749 memcg_oom_recover(memcg);
1752 current->memcg_in_oom = NULL;
1753 css_put(&memcg->css);
1758 * mem_cgroup_begin_page_stat - begin a page state statistics transaction
1759 * @page: page that is going to change accounted state
1761 * This function must mark the beginning of an accounted page state
1762 * change to prevent double accounting when the page is concurrently
1763 * being moved to another memcg:
1765 * memcg = mem_cgroup_begin_page_stat(page);
1766 * if (TestClearPageState(page))
1767 * mem_cgroup_update_page_stat(memcg, state, -1);
1768 * mem_cgroup_end_page_stat(memcg);
1770 struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page)
1772 struct mem_cgroup *memcg;
1773 unsigned long flags;
1776 * The RCU lock is held throughout the transaction. The fast
1777 * path can get away without acquiring the memcg->move_lock
1778 * because page moving starts with an RCU grace period.
1780 * The RCU lock also protects the memcg from being freed when
1781 * the page state that is going to change is the only thing
1782 * preventing the page from being uncharged.
1783 * E.g. end-writeback clearing PageWriteback(), which allows
1784 * migration to go ahead and uncharge the page before the
1785 * account transaction might be complete.
1789 if (mem_cgroup_disabled())
1792 memcg = page->mem_cgroup;
1793 if (unlikely(!memcg))
1796 if (atomic_read(&memcg->moving_account) <= 0)
1799 spin_lock_irqsave(&memcg->move_lock, flags);
1800 if (memcg != page->mem_cgroup) {
1801 spin_unlock_irqrestore(&memcg->move_lock, flags);
1806 * When charge migration first begins, we can have locked and
1807 * unlocked page stat updates happening concurrently. Track
1808 * the task who has the lock for mem_cgroup_end_page_stat().
1810 memcg->move_lock_task = current;
1811 memcg->move_lock_flags = flags;
1815 EXPORT_SYMBOL(mem_cgroup_begin_page_stat);
1818 * mem_cgroup_end_page_stat - finish a page state statistics transaction
1819 * @memcg: the memcg that was accounted against
1821 void mem_cgroup_end_page_stat(struct mem_cgroup *memcg)
1823 if (memcg && memcg->move_lock_task == current) {
1824 unsigned long flags = memcg->move_lock_flags;
1826 memcg->move_lock_task = NULL;
1827 memcg->move_lock_flags = 0;
1829 spin_unlock_irqrestore(&memcg->move_lock, flags);
1834 EXPORT_SYMBOL(mem_cgroup_end_page_stat);
1837 * size of first charge trial. "32" comes from vmscan.c's magic value.
1838 * TODO: maybe necessary to use big numbers in big irons.
1840 #define CHARGE_BATCH 32U
1841 struct memcg_stock_pcp {
1842 struct mem_cgroup *cached; /* this never be root cgroup */
1843 unsigned int nr_pages;
1844 struct work_struct work;
1845 unsigned long flags;
1846 #define FLUSHING_CACHED_CHARGE 0
1848 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1849 static DEFINE_MUTEX(percpu_charge_mutex);
1852 * consume_stock: Try to consume stocked charge on this cpu.
1853 * @memcg: memcg to consume from.
1854 * @nr_pages: how many pages to charge.
1856 * The charges will only happen if @memcg matches the current cpu's memcg
1857 * stock, and at least @nr_pages are available in that stock. Failure to
1858 * service an allocation will refill the stock.
1860 * returns true if successful, false otherwise.
1862 static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1864 struct memcg_stock_pcp *stock;
1867 if (nr_pages > CHARGE_BATCH)
1870 stock = &get_cpu_var(memcg_stock);
1871 if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
1872 stock->nr_pages -= nr_pages;
1875 put_cpu_var(memcg_stock);
1880 * Returns stocks cached in percpu and reset cached information.
1882 static void drain_stock(struct memcg_stock_pcp *stock)
1884 struct mem_cgroup *old = stock->cached;
1886 if (stock->nr_pages) {
1887 page_counter_uncharge(&old->memory, stock->nr_pages);
1888 if (do_swap_account)
1889 page_counter_uncharge(&old->memsw, stock->nr_pages);
1890 css_put_many(&old->css, stock->nr_pages);
1891 stock->nr_pages = 0;
1893 stock->cached = NULL;
1897 * This must be called under preempt disabled or must be called by
1898 * a thread which is pinned to local cpu.
1900 static void drain_local_stock(struct work_struct *dummy)
1902 struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
1904 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1908 * Cache charges(val) to local per_cpu area.
1909 * This will be consumed by consume_stock() function, later.
1911 static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
1913 struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1915 if (stock->cached != memcg) { /* reset if necessary */
1917 stock->cached = memcg;
1919 stock->nr_pages += nr_pages;
1920 put_cpu_var(memcg_stock);
1924 * Drains all per-CPU charge caches for given root_memcg resp. subtree
1925 * of the hierarchy under it.
1927 static void drain_all_stock(struct mem_cgroup *root_memcg)
1931 /* If someone's already draining, avoid adding running more workers. */
1932 if (!mutex_trylock(&percpu_charge_mutex))
1934 /* Notify other cpus that system-wide "drain" is running */
1937 for_each_online_cpu(cpu) {
1938 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1939 struct mem_cgroup *memcg;
1941 memcg = stock->cached;
1942 if (!memcg || !stock->nr_pages)
1944 if (!mem_cgroup_is_descendant(memcg, root_memcg))
1946 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) {
1948 drain_local_stock(&stock->work);
1950 schedule_work_on(cpu, &stock->work);
1955 mutex_unlock(&percpu_charge_mutex);
1958 static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
1959 unsigned long action,
1962 int cpu = (unsigned long)hcpu;
1963 struct memcg_stock_pcp *stock;
1965 if (action == CPU_ONLINE)
1968 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1971 stock = &per_cpu(memcg_stock, cpu);
1977 * Scheduled by try_charge() to be executed from the userland return path
1978 * and reclaims memory over the high limit.
1980 void mem_cgroup_handle_over_high(void)
1982 unsigned int nr_pages = current->memcg_nr_pages_over_high;
1983 struct mem_cgroup *memcg, *pos;
1985 if (likely(!nr_pages))
1988 pos = memcg = get_mem_cgroup_from_mm(current->mm);
1991 if (page_counter_read(&pos->memory) <= pos->high)
1993 mem_cgroup_events(pos, MEMCG_HIGH, 1);
1994 try_to_free_mem_cgroup_pages(pos, nr_pages, GFP_KERNEL, true);
1995 } while ((pos = parent_mem_cgroup(pos)));
1997 css_put(&memcg->css);
1998 current->memcg_nr_pages_over_high = 0;
2001 static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
2002 unsigned int nr_pages)
2004 unsigned int batch = max(CHARGE_BATCH, nr_pages);
2005 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2006 struct mem_cgroup *mem_over_limit;
2007 struct page_counter *counter;
2008 unsigned long nr_reclaimed;
2009 bool may_swap = true;
2010 bool drained = false;
2012 if (mem_cgroup_is_root(memcg))
2015 if (consume_stock(memcg, nr_pages))
2018 if (!do_swap_account ||
2019 !page_counter_try_charge(&memcg->memsw, batch, &counter)) {
2020 if (!page_counter_try_charge(&memcg->memory, batch, &counter))
2022 if (do_swap_account)
2023 page_counter_uncharge(&memcg->memsw, batch);
2024 mem_over_limit = mem_cgroup_from_counter(counter, memory);
2026 mem_over_limit = mem_cgroup_from_counter(counter, memsw);
2030 if (batch > nr_pages) {
2036 * Unlike in global OOM situations, memcg is not in a physical
2037 * memory shortage. Allow dying and OOM-killed tasks to
2038 * bypass the last charges so that they can exit quickly and
2039 * free their memory.
2041 if (unlikely(test_thread_flag(TIF_MEMDIE) ||
2042 fatal_signal_pending(current) ||
2043 current->flags & PF_EXITING))
2046 if (unlikely(task_in_memcg_oom(current)))
2049 if (!(gfp_mask & __GFP_WAIT))
2052 mem_cgroup_events(mem_over_limit, MEMCG_MAX, 1);
2054 nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages,
2055 gfp_mask, may_swap);
2057 if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
2061 drain_all_stock(mem_over_limit);
2066 if (gfp_mask & __GFP_NORETRY)
2069 * Even though the limit is exceeded at this point, reclaim
2070 * may have been able to free some pages. Retry the charge
2071 * before killing the task.
2073 * Only for regular pages, though: huge pages are rather
2074 * unlikely to succeed so close to the limit, and we fall back
2075 * to regular pages anyway in case of failure.
2077 if (nr_reclaimed && nr_pages <= (1 << PAGE_ALLOC_COSTLY_ORDER))
2080 * At task move, charge accounts can be doubly counted. So, it's
2081 * better to wait until the end of task_move if something is going on.
2083 if (mem_cgroup_wait_acct_move(mem_over_limit))
2089 if (gfp_mask & __GFP_NOFAIL)
2092 if (fatal_signal_pending(current))
2095 mem_cgroup_events(mem_over_limit, MEMCG_OOM, 1);
2097 mem_cgroup_oom(mem_over_limit, gfp_mask,
2098 get_order(nr_pages * PAGE_SIZE));
2100 if (!(gfp_mask & __GFP_NOFAIL))
2104 * The allocation either can't fail or will lead to more memory
2105 * being freed very soon. Allow memory usage go over the limit
2106 * temporarily by force charging it.
2108 page_counter_charge(&memcg->memory, nr_pages);
2109 if (do_swap_account)
2110 page_counter_charge(&memcg->memsw, nr_pages);
2111 css_get_many(&memcg->css, nr_pages);
2116 css_get_many(&memcg->css, batch);
2117 if (batch > nr_pages)
2118 refill_stock(memcg, batch - nr_pages);
2121 * If the hierarchy is above the normal consumption range, schedule
2122 * reclaim on returning to userland. We can perform reclaim here
2123 * if __GFP_WAIT but let's always punt for simplicity and so that
2124 * GFP_KERNEL can consistently be used during reclaim. @memcg is
2125 * not recorded as it most likely matches current's and won't
2126 * change in the meantime. As high limit is checked again before
2127 * reclaim, the cost of mismatch is negligible.
2130 if (page_counter_read(&memcg->memory) > memcg->high) {
2131 current->memcg_nr_pages_over_high += nr_pages;
2132 set_notify_resume(current);
2135 } while ((memcg = parent_mem_cgroup(memcg)));
2140 static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
2142 if (mem_cgroup_is_root(memcg))
2145 page_counter_uncharge(&memcg->memory, nr_pages);
2146 if (do_swap_account)
2147 page_counter_uncharge(&memcg->memsw, nr_pages);
2149 css_put_many(&memcg->css, nr_pages);
2152 static void lock_page_lru(struct page *page, int *isolated)
2154 struct zone *zone = page_zone(page);
2156 spin_lock_irq(&zone->lru_lock);
2157 if (PageLRU(page)) {
2158 struct lruvec *lruvec;
2160 lruvec = mem_cgroup_page_lruvec(page, zone);
2162 del_page_from_lru_list(page, lruvec, page_lru(page));
2168 static void unlock_page_lru(struct page *page, int isolated)
2170 struct zone *zone = page_zone(page);
2173 struct lruvec *lruvec;
2175 lruvec = mem_cgroup_page_lruvec(page, zone);
2176 VM_BUG_ON_PAGE(PageLRU(page), page);
2178 add_page_to_lru_list(page, lruvec, page_lru(page));
2180 spin_unlock_irq(&zone->lru_lock);
2183 static void commit_charge(struct page *page, struct mem_cgroup *memcg,
2188 VM_BUG_ON_PAGE(page->mem_cgroup, page);
2191 * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
2192 * may already be on some other mem_cgroup's LRU. Take care of it.
2195 lock_page_lru(page, &isolated);
2198 * Nobody should be changing or seriously looking at
2199 * page->mem_cgroup at this point:
2201 * - the page is uncharged
2203 * - the page is off-LRU
2205 * - an anonymous fault has exclusive page access, except for
2206 * a locked page table
2208 * - a page cache insertion, a swapin fault, or a migration
2209 * have the page locked
2211 page->mem_cgroup = memcg;
2214 unlock_page_lru(page, isolated);
2217 #ifdef CONFIG_MEMCG_KMEM
2218 int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp,
2219 unsigned long nr_pages)
2221 struct page_counter *counter;
2224 ret = page_counter_try_charge(&memcg->kmem, nr_pages, &counter);
2228 ret = try_charge(memcg, gfp, nr_pages);
2230 page_counter_uncharge(&memcg->kmem, nr_pages);
2235 void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages)
2237 page_counter_uncharge(&memcg->memory, nr_pages);
2238 if (do_swap_account)
2239 page_counter_uncharge(&memcg->memsw, nr_pages);
2241 page_counter_uncharge(&memcg->kmem, nr_pages);
2243 css_put_many(&memcg->css, nr_pages);
2246 static int memcg_alloc_cache_id(void)
2251 id = ida_simple_get(&memcg_cache_ida,
2252 0, MEMCG_CACHES_MAX_SIZE, GFP_KERNEL);
2256 if (id < memcg_nr_cache_ids)
2260 * There's no space for the new id in memcg_caches arrays,
2261 * so we have to grow them.
2263 down_write(&memcg_cache_ids_sem);
2265 size = 2 * (id + 1);
2266 if (size < MEMCG_CACHES_MIN_SIZE)
2267 size = MEMCG_CACHES_MIN_SIZE;
2268 else if (size > MEMCG_CACHES_MAX_SIZE)
2269 size = MEMCG_CACHES_MAX_SIZE;
2271 err = memcg_update_all_caches(size);
2273 err = memcg_update_all_list_lrus(size);
2275 memcg_nr_cache_ids = size;
2277 up_write(&memcg_cache_ids_sem);
2280 ida_simple_remove(&memcg_cache_ida, id);
2286 static void memcg_free_cache_id(int id)
2288 ida_simple_remove(&memcg_cache_ida, id);
2291 struct memcg_kmem_cache_create_work {
2292 struct mem_cgroup *memcg;
2293 struct kmem_cache *cachep;
2294 struct work_struct work;
2297 static void memcg_kmem_cache_create_func(struct work_struct *w)
2299 struct memcg_kmem_cache_create_work *cw =
2300 container_of(w, struct memcg_kmem_cache_create_work, work);
2301 struct mem_cgroup *memcg = cw->memcg;
2302 struct kmem_cache *cachep = cw->cachep;
2304 memcg_create_kmem_cache(memcg, cachep);
2306 css_put(&memcg->css);
2311 * Enqueue the creation of a per-memcg kmem_cache.
2313 static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2314 struct kmem_cache *cachep)
2316 struct memcg_kmem_cache_create_work *cw;
2318 cw = kmalloc(sizeof(*cw), GFP_NOWAIT);
2322 css_get(&memcg->css);
2325 cw->cachep = cachep;
2326 INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
2328 schedule_work(&cw->work);
2331 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2332 struct kmem_cache *cachep)
2335 * We need to stop accounting when we kmalloc, because if the
2336 * corresponding kmalloc cache is not yet created, the first allocation
2337 * in __memcg_schedule_kmem_cache_create will recurse.
2339 * However, it is better to enclose the whole function. Depending on
2340 * the debugging options enabled, INIT_WORK(), for instance, can
2341 * trigger an allocation. This too, will make us recurse. Because at
2342 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2343 * the safest choice is to do it like this, wrapping the whole function.
2345 current->memcg_kmem_skip_account = 1;
2346 __memcg_schedule_kmem_cache_create(memcg, cachep);
2347 current->memcg_kmem_skip_account = 0;
2351 * Return the kmem_cache we're supposed to use for a slab allocation.
2352 * We try to use the current memcg's version of the cache.
2354 * If the cache does not exist yet, if we are the first user of it,
2355 * we either create it immediately, if possible, or create it asynchronously
2357 * In the latter case, we will let the current allocation go through with
2358 * the original cache.
2360 * Can't be called in interrupt context or from kernel threads.
2361 * This function needs to be called with rcu_read_lock() held.
2363 struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep)
2365 struct mem_cgroup *memcg;
2366 struct kmem_cache *memcg_cachep;
2369 VM_BUG_ON(!is_root_cache(cachep));
2371 if (current->memcg_kmem_skip_account)
2374 memcg = get_mem_cgroup_from_mm(current->mm);
2375 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2379 memcg_cachep = cache_from_memcg_idx(cachep, kmemcg_id);
2380 if (likely(memcg_cachep))
2381 return memcg_cachep;
2384 * If we are in a safe context (can wait, and not in interrupt
2385 * context), we could be be predictable and return right away.
2386 * This would guarantee that the allocation being performed
2387 * already belongs in the new cache.
2389 * However, there are some clashes that can arrive from locking.
2390 * For instance, because we acquire the slab_mutex while doing
2391 * memcg_create_kmem_cache, this means no further allocation
2392 * could happen with the slab_mutex held. So it's better to
2395 memcg_schedule_kmem_cache_create(memcg, cachep);
2397 css_put(&memcg->css);
2401 void __memcg_kmem_put_cache(struct kmem_cache *cachep)
2403 if (!is_root_cache(cachep))
2404 css_put(&cachep->memcg_params.memcg->css);
2408 * We need to verify if the allocation against current->mm->owner's memcg is
2409 * possible for the given order. But the page is not allocated yet, so we'll
2410 * need a further commit step to do the final arrangements.
2412 * It is possible for the task to switch cgroups in this mean time, so at
2413 * commit time, we can't rely on task conversion any longer. We'll then use
2414 * the handle argument to return to the caller which cgroup we should commit
2415 * against. We could also return the memcg directly and avoid the pointer
2416 * passing, but a boolean return value gives better semantics considering
2417 * the compiled-out case as well.
2419 * Returning true means the allocation is possible.
2422 __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
2424 struct mem_cgroup *memcg;
2429 memcg = get_mem_cgroup_from_mm(current->mm);
2431 if (!memcg_kmem_is_active(memcg)) {
2432 css_put(&memcg->css);
2436 ret = memcg_charge_kmem(memcg, gfp, 1 << order);
2440 css_put(&memcg->css);
2444 void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg,
2447 VM_BUG_ON(mem_cgroup_is_root(memcg));
2449 /* The page allocation failed. Revert */
2451 memcg_uncharge_kmem(memcg, 1 << order);
2454 page->mem_cgroup = memcg;
2457 void __memcg_kmem_uncharge_pages(struct page *page, int order)
2459 struct mem_cgroup *memcg = page->mem_cgroup;
2464 VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
2466 memcg_uncharge_kmem(memcg, 1 << order);
2467 page->mem_cgroup = NULL;
2470 struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr)
2472 struct mem_cgroup *memcg = NULL;
2473 struct kmem_cache *cachep;
2476 page = virt_to_head_page(ptr);
2477 if (PageSlab(page)) {
2478 cachep = page->slab_cache;
2479 if (!is_root_cache(cachep))
2480 memcg = cachep->memcg_params.memcg;
2482 /* page allocated by alloc_kmem_pages */
2483 memcg = page->mem_cgroup;
2487 #endif /* CONFIG_MEMCG_KMEM */
2489 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2492 * Because tail pages are not marked as "used", set it. We're under
2493 * zone->lru_lock, 'splitting on pmd' and compound_lock.
2494 * charge/uncharge will be never happen and move_account() is done under
2495 * compound_lock(), so we don't have to take care of races.
2497 void mem_cgroup_split_huge_fixup(struct page *head)
2501 if (mem_cgroup_disabled())
2504 for (i = 1; i < HPAGE_PMD_NR; i++)
2505 head[i].mem_cgroup = head->mem_cgroup;
2507 __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE],
2510 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2512 #ifdef CONFIG_MEMCG_SWAP
2513 static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg,
2516 int val = (charge) ? 1 : -1;
2517 this_cpu_add(memcg->stat->count[MEM_CGROUP_STAT_SWAP], val);
2521 * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2522 * @entry: swap entry to be moved
2523 * @from: mem_cgroup which the entry is moved from
2524 * @to: mem_cgroup which the entry is moved to
2526 * It succeeds only when the swap_cgroup's record for this entry is the same
2527 * as the mem_cgroup's id of @from.
2529 * Returns 0 on success, -EINVAL on failure.
2531 * The caller must have charged to @to, IOW, called page_counter_charge() about
2532 * both res and memsw, and called css_get().
2534 static int mem_cgroup_move_swap_account(swp_entry_t entry,
2535 struct mem_cgroup *from, struct mem_cgroup *to)
2537 unsigned short old_id, new_id;
2539 old_id = mem_cgroup_id(from);
2540 new_id = mem_cgroup_id(to);
2542 if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2543 mem_cgroup_swap_statistics(from, false);
2544 mem_cgroup_swap_statistics(to, true);
2550 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2551 struct mem_cgroup *from, struct mem_cgroup *to)
2557 static DEFINE_MUTEX(memcg_limit_mutex);
2559 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
2560 unsigned long limit)
2562 unsigned long curusage;
2563 unsigned long oldusage;
2564 bool enlarge = false;
2569 * For keeping hierarchical_reclaim simple, how long we should retry
2570 * is depends on callers. We set our retry-count to be function
2571 * of # of children which we should visit in this loop.
2573 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2574 mem_cgroup_count_children(memcg);
2576 oldusage = page_counter_read(&memcg->memory);
2579 if (signal_pending(current)) {
2584 mutex_lock(&memcg_limit_mutex);
2585 if (limit > memcg->memsw.limit) {
2586 mutex_unlock(&memcg_limit_mutex);
2590 if (limit > memcg->memory.limit)
2592 ret = page_counter_limit(&memcg->memory, limit);
2593 mutex_unlock(&memcg_limit_mutex);
2598 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
2600 curusage = page_counter_read(&memcg->memory);
2601 /* Usage is reduced ? */
2602 if (curusage >= oldusage)
2605 oldusage = curusage;
2606 } while (retry_count);
2608 if (!ret && enlarge)
2609 memcg_oom_recover(memcg);
2614 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
2615 unsigned long limit)
2617 unsigned long curusage;
2618 unsigned long oldusage;
2619 bool enlarge = false;
2623 /* see mem_cgroup_resize_res_limit */
2624 retry_count = MEM_CGROUP_RECLAIM_RETRIES *
2625 mem_cgroup_count_children(memcg);
2627 oldusage = page_counter_read(&memcg->memsw);
2630 if (signal_pending(current)) {
2635 mutex_lock(&memcg_limit_mutex);
2636 if (limit < memcg->memory.limit) {
2637 mutex_unlock(&memcg_limit_mutex);
2641 if (limit > memcg->memsw.limit)
2643 ret = page_counter_limit(&memcg->memsw, limit);
2644 mutex_unlock(&memcg_limit_mutex);
2649 try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
2651 curusage = page_counter_read(&memcg->memsw);
2652 /* Usage is reduced ? */
2653 if (curusage >= oldusage)
2656 oldusage = curusage;
2657 } while (retry_count);
2659 if (!ret && enlarge)
2660 memcg_oom_recover(memcg);
2665 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
2667 unsigned long *total_scanned)
2669 unsigned long nr_reclaimed = 0;
2670 struct mem_cgroup_per_zone *mz, *next_mz = NULL;
2671 unsigned long reclaimed;
2673 struct mem_cgroup_tree_per_zone *mctz;
2674 unsigned long excess;
2675 unsigned long nr_scanned;
2680 mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
2682 * This loop can run a while, specially if mem_cgroup's continuously
2683 * keep exceeding their soft limit and putting the system under
2690 mz = mem_cgroup_largest_soft_limit_node(mctz);
2695 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, zone,
2696 gfp_mask, &nr_scanned);
2697 nr_reclaimed += reclaimed;
2698 *total_scanned += nr_scanned;
2699 spin_lock_irq(&mctz->lock);
2700 __mem_cgroup_remove_exceeded(mz, mctz);
2703 * If we failed to reclaim anything from this memory cgroup
2704 * it is time to move on to the next cgroup
2708 next_mz = __mem_cgroup_largest_soft_limit_node(mctz);
2710 excess = soft_limit_excess(mz->memcg);
2712 * One school of thought says that we should not add
2713 * back the node to the tree if reclaim returns 0.
2714 * But our reclaim could return 0, simply because due
2715 * to priority we are exposing a smaller subset of
2716 * memory to reclaim from. Consider this as a longer
2719 /* If excess == 0, no tree ops */
2720 __mem_cgroup_insert_exceeded(mz, mctz, excess);
2721 spin_unlock_irq(&mctz->lock);
2722 css_put(&mz->memcg->css);
2725 * Could not reclaim anything and there are no more
2726 * mem cgroups to try or we seem to be looping without
2727 * reclaiming anything.
2729 if (!nr_reclaimed &&
2731 loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
2733 } while (!nr_reclaimed);
2735 css_put(&next_mz->memcg->css);
2736 return nr_reclaimed;
2740 * Test whether @memcg has children, dead or alive. Note that this
2741 * function doesn't care whether @memcg has use_hierarchy enabled and
2742 * returns %true if there are child csses according to the cgroup
2743 * hierarchy. Testing use_hierarchy is the caller's responsiblity.
2745 static inline bool memcg_has_children(struct mem_cgroup *memcg)
2750 * The lock does not prevent addition or deletion of children, but
2751 * it prevents a new child from being initialized based on this
2752 * parent in css_online(), so it's enough to decide whether
2753 * hierarchically inherited attributes can still be changed or not.
2755 lockdep_assert_held(&memcg_create_mutex);
2758 ret = css_next_child(NULL, &memcg->css);
2764 * Reclaims as many pages from the given memcg as possible and moves
2765 * the rest to the parent.
2767 * Caller is responsible for holding css reference for memcg.
2769 static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
2771 int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
2773 /* we call try-to-free pages for make this cgroup empty */
2774 lru_add_drain_all();
2775 /* try to free all pages in this cgroup */
2776 while (nr_retries && page_counter_read(&memcg->memory)) {
2779 if (signal_pending(current))
2782 progress = try_to_free_mem_cgroup_pages(memcg, 1,
2786 /* maybe some writeback is necessary */
2787 congestion_wait(BLK_RW_ASYNC, HZ/10);
2795 static ssize_t mem_cgroup_force_empty_write(struct kernfs_open_file *of,
2796 char *buf, size_t nbytes,
2799 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
2801 if (mem_cgroup_is_root(memcg))
2803 return mem_cgroup_force_empty(memcg) ?: nbytes;
2806 static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
2809 return mem_cgroup_from_css(css)->use_hierarchy;
2812 static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
2813 struct cftype *cft, u64 val)
2816 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2817 struct mem_cgroup *parent_memcg = mem_cgroup_from_css(memcg->css.parent);
2819 mutex_lock(&memcg_create_mutex);
2821 if (memcg->use_hierarchy == val)
2825 * If parent's use_hierarchy is set, we can't make any modifications
2826 * in the child subtrees. If it is unset, then the change can
2827 * occur, provided the current cgroup has no children.
2829 * For the root cgroup, parent_mem is NULL, we allow value to be
2830 * set if there are no children.
2832 if ((!parent_memcg || !parent_memcg->use_hierarchy) &&
2833 (val == 1 || val == 0)) {
2834 if (!memcg_has_children(memcg))
2835 memcg->use_hierarchy = val;
2842 mutex_unlock(&memcg_create_mutex);
2847 static unsigned long tree_stat(struct mem_cgroup *memcg,
2848 enum mem_cgroup_stat_index idx)
2850 struct mem_cgroup *iter;
2851 unsigned long val = 0;
2853 for_each_mem_cgroup_tree(iter, memcg)
2854 val += mem_cgroup_read_stat(iter, idx);
2859 static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
2863 if (mem_cgroup_is_root(memcg)) {
2864 val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE);
2865 val += tree_stat(memcg, MEM_CGROUP_STAT_RSS);
2867 val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP);
2870 val = page_counter_read(&memcg->memory);
2872 val = page_counter_read(&memcg->memsw);
2874 return val << PAGE_SHIFT;
2885 static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css,
2888 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
2889 struct page_counter *counter;
2891 switch (MEMFILE_TYPE(cft->private)) {
2893 counter = &memcg->memory;
2896 counter = &memcg->memsw;
2899 counter = &memcg->kmem;
2905 switch (MEMFILE_ATTR(cft->private)) {
2907 if (counter == &memcg->memory)
2908 return mem_cgroup_usage(memcg, false);
2909 if (counter == &memcg->memsw)
2910 return mem_cgroup_usage(memcg, true);
2911 return (u64)page_counter_read(counter) * PAGE_SIZE;
2913 return (u64)counter->limit * PAGE_SIZE;
2915 return (u64)counter->watermark * PAGE_SIZE;
2917 return counter->failcnt;
2918 case RES_SOFT_LIMIT:
2919 return (u64)memcg->soft_limit * PAGE_SIZE;
2925 #ifdef CONFIG_MEMCG_KMEM
2926 static int memcg_activate_kmem(struct mem_cgroup *memcg,
2927 unsigned long nr_pages)
2932 BUG_ON(memcg->kmemcg_id >= 0);
2933 BUG_ON(memcg->kmem_acct_activated);
2934 BUG_ON(memcg->kmem_acct_active);
2937 * For simplicity, we won't allow this to be disabled. It also can't
2938 * be changed if the cgroup has children already, or if tasks had
2941 * If tasks join before we set the limit, a person looking at
2942 * kmem.usage_in_bytes will have no way to determine when it took
2943 * place, which makes the value quite meaningless.
2945 * After it first became limited, changes in the value of the limit are
2946 * of course permitted.
2948 mutex_lock(&memcg_create_mutex);
2949 if (cgroup_has_tasks(memcg->css.cgroup) ||
2950 (memcg->use_hierarchy && memcg_has_children(memcg)))
2952 mutex_unlock(&memcg_create_mutex);
2956 memcg_id = memcg_alloc_cache_id();
2963 * We couldn't have accounted to this cgroup, because it hasn't got
2964 * activated yet, so this should succeed.
2966 err = page_counter_limit(&memcg->kmem, nr_pages);
2969 static_key_slow_inc(&memcg_kmem_enabled_key);
2971 * A memory cgroup is considered kmem-active as soon as it gets
2972 * kmemcg_id. Setting the id after enabling static branching will
2973 * guarantee no one starts accounting before all call sites are
2976 memcg->kmemcg_id = memcg_id;
2977 memcg->kmem_acct_activated = true;
2978 memcg->kmem_acct_active = true;
2983 static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
2984 unsigned long limit)
2988 mutex_lock(&memcg_limit_mutex);
2989 if (!memcg_kmem_is_active(memcg))
2990 ret = memcg_activate_kmem(memcg, limit);
2992 ret = page_counter_limit(&memcg->kmem, limit);
2993 mutex_unlock(&memcg_limit_mutex);
2997 static int memcg_propagate_kmem(struct mem_cgroup *memcg)
3000 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
3005 mutex_lock(&memcg_limit_mutex);
3007 * If the parent cgroup is not kmem-active now, it cannot be activated
3008 * after this point, because it has at least one child already.
3010 if (memcg_kmem_is_active(parent))
3011 ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX);
3012 mutex_unlock(&memcg_limit_mutex);
3016 static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
3017 unsigned long limit)
3021 #endif /* CONFIG_MEMCG_KMEM */
3024 * The user of this function is...
3027 static ssize_t mem_cgroup_write(struct kernfs_open_file *of,
3028 char *buf, size_t nbytes, loff_t off)
3030 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3031 unsigned long nr_pages;
3034 buf = strstrip(buf);
3035 ret = page_counter_memparse(buf, "-1", &nr_pages);
3039 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3041 if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3045 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3047 ret = mem_cgroup_resize_limit(memcg, nr_pages);
3050 ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages);
3053 ret = memcg_update_kmem_limit(memcg, nr_pages);
3057 case RES_SOFT_LIMIT:
3058 memcg->soft_limit = nr_pages;
3062 return ret ?: nbytes;
3065 static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf,
3066 size_t nbytes, loff_t off)
3068 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
3069 struct page_counter *counter;
3071 switch (MEMFILE_TYPE(of_cft(of)->private)) {
3073 counter = &memcg->memory;
3076 counter = &memcg->memsw;
3079 counter = &memcg->kmem;
3085 switch (MEMFILE_ATTR(of_cft(of)->private)) {
3087 page_counter_reset_watermark(counter);
3090 counter->failcnt = 0;
3099 static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
3102 return mem_cgroup_from_css(css)->move_charge_at_immigrate;
3106 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3107 struct cftype *cft, u64 val)
3109 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3111 if (val & ~MOVE_MASK)
3115 * No kind of locking is needed in here, because ->can_attach() will
3116 * check this value once in the beginning of the process, and then carry
3117 * on with stale data. This means that changes to this value will only
3118 * affect task migrations starting after the change.
3120 memcg->move_charge_at_immigrate = val;
3124 static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
3125 struct cftype *cft, u64 val)
3132 static int memcg_numa_stat_show(struct seq_file *m, void *v)
3136 unsigned int lru_mask;
3139 static const struct numa_stat stats[] = {
3140 { "total", LRU_ALL },
3141 { "file", LRU_ALL_FILE },
3142 { "anon", LRU_ALL_ANON },
3143 { "unevictable", BIT(LRU_UNEVICTABLE) },
3145 const struct numa_stat *stat;
3148 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3150 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3151 nr = mem_cgroup_nr_lru_pages(memcg, stat->lru_mask);
3152 seq_printf(m, "%s=%lu", stat->name, nr);
3153 for_each_node_state(nid, N_MEMORY) {
3154 nr = mem_cgroup_node_nr_lru_pages(memcg, nid,
3156 seq_printf(m, " N%d=%lu", nid, nr);
3161 for (stat = stats; stat < stats + ARRAY_SIZE(stats); stat++) {
3162 struct mem_cgroup *iter;
3165 for_each_mem_cgroup_tree(iter, memcg)
3166 nr += mem_cgroup_nr_lru_pages(iter, stat->lru_mask);
3167 seq_printf(m, "hierarchical_%s=%lu", stat->name, nr);
3168 for_each_node_state(nid, N_MEMORY) {
3170 for_each_mem_cgroup_tree(iter, memcg)
3171 nr += mem_cgroup_node_nr_lru_pages(
3172 iter, nid, stat->lru_mask);
3173 seq_printf(m, " N%d=%lu", nid, nr);
3180 #endif /* CONFIG_NUMA */
3182 static int memcg_stat_show(struct seq_file *m, void *v)
3184 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
3185 unsigned long memory, memsw;
3186 struct mem_cgroup *mi;
3189 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_stat_names) !=
3190 MEM_CGROUP_STAT_NSTATS);
3191 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_events_names) !=
3192 MEM_CGROUP_EVENTS_NSTATS);
3193 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
3195 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3196 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
3198 seq_printf(m, "%s %lu\n", mem_cgroup_stat_names[i],
3199 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
3202 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
3203 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
3204 mem_cgroup_read_events(memcg, i));
3206 for (i = 0; i < NR_LRU_LISTS; i++)
3207 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
3208 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
3210 /* Hierarchical information */
3211 memory = memsw = PAGE_COUNTER_MAX;
3212 for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) {
3213 memory = min(memory, mi->memory.limit);
3214 memsw = min(memsw, mi->memsw.limit);
3216 seq_printf(m, "hierarchical_memory_limit %llu\n",
3217 (u64)memory * PAGE_SIZE);
3218 if (do_swap_account)
3219 seq_printf(m, "hierarchical_memsw_limit %llu\n",
3220 (u64)memsw * PAGE_SIZE);
3222 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
3223 unsigned long long val = 0;
3225 if (i == MEM_CGROUP_STAT_SWAP && !do_swap_account)
3227 for_each_mem_cgroup_tree(mi, memcg)
3228 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
3229 seq_printf(m, "total_%s %llu\n", mem_cgroup_stat_names[i], val);
3232 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
3233 unsigned long long val = 0;
3235 for_each_mem_cgroup_tree(mi, memcg)
3236 val += mem_cgroup_read_events(mi, i);
3237 seq_printf(m, "total_%s %llu\n",
3238 mem_cgroup_events_names[i], val);
3241 for (i = 0; i < NR_LRU_LISTS; i++) {
3242 unsigned long long val = 0;
3244 for_each_mem_cgroup_tree(mi, memcg)
3245 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
3246 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
3249 #ifdef CONFIG_DEBUG_VM
3252 struct mem_cgroup_per_zone *mz;
3253 struct zone_reclaim_stat *rstat;
3254 unsigned long recent_rotated[2] = {0, 0};
3255 unsigned long recent_scanned[2] = {0, 0};
3257 for_each_online_node(nid)
3258 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3259 mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
3260 rstat = &mz->lruvec.reclaim_stat;
3262 recent_rotated[0] += rstat->recent_rotated[0];
3263 recent_rotated[1] += rstat->recent_rotated[1];
3264 recent_scanned[0] += rstat->recent_scanned[0];
3265 recent_scanned[1] += rstat->recent_scanned[1];
3267 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
3268 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
3269 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
3270 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
3277 static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
3280 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3282 return mem_cgroup_swappiness(memcg);
3285 static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
3286 struct cftype *cft, u64 val)
3288 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3294 memcg->swappiness = val;
3296 vm_swappiness = val;
3301 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3303 struct mem_cgroup_threshold_ary *t;
3304 unsigned long usage;
3309 t = rcu_dereference(memcg->thresholds.primary);
3311 t = rcu_dereference(memcg->memsw_thresholds.primary);
3316 usage = mem_cgroup_usage(memcg, swap);
3319 * current_threshold points to threshold just below or equal to usage.
3320 * If it's not true, a threshold was crossed after last
3321 * call of __mem_cgroup_threshold().
3323 i = t->current_threshold;
3326 * Iterate backward over array of thresholds starting from
3327 * current_threshold and check if a threshold is crossed.
3328 * If none of thresholds below usage is crossed, we read
3329 * only one element of the array here.
3331 for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3332 eventfd_signal(t->entries[i].eventfd, 1);
3334 /* i = current_threshold + 1 */
3338 * Iterate forward over array of thresholds starting from
3339 * current_threshold+1 and check if a threshold is crossed.
3340 * If none of thresholds above usage is crossed, we read
3341 * only one element of the array here.
3343 for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3344 eventfd_signal(t->entries[i].eventfd, 1);
3346 /* Update current_threshold */
3347 t->current_threshold = i - 1;
3352 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
3355 __mem_cgroup_threshold(memcg, false);
3356 if (do_swap_account)
3357 __mem_cgroup_threshold(memcg, true);
3359 memcg = parent_mem_cgroup(memcg);
3363 static int compare_thresholds(const void *a, const void *b)
3365 const struct mem_cgroup_threshold *_a = a;
3366 const struct mem_cgroup_threshold *_b = b;
3368 if (_a->threshold > _b->threshold)
3371 if (_a->threshold < _b->threshold)
3377 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
3379 struct mem_cgroup_eventfd_list *ev;
3381 spin_lock(&memcg_oom_lock);
3383 list_for_each_entry(ev, &memcg->oom_notify, list)
3384 eventfd_signal(ev->eventfd, 1);
3386 spin_unlock(&memcg_oom_lock);
3390 static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
3392 struct mem_cgroup *iter;
3394 for_each_mem_cgroup_tree(iter, memcg)
3395 mem_cgroup_oom_notify_cb(iter);
3398 static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3399 struct eventfd_ctx *eventfd, const char *args, enum res_type type)
3401 struct mem_cgroup_thresholds *thresholds;
3402 struct mem_cgroup_threshold_ary *new;
3403 unsigned long threshold;
3404 unsigned long usage;
3407 ret = page_counter_memparse(args, "-1", &threshold);
3410 threshold <<= PAGE_SHIFT;
3412 mutex_lock(&memcg->thresholds_lock);
3415 thresholds = &memcg->thresholds;
3416 usage = mem_cgroup_usage(memcg, false);
3417 } else if (type == _MEMSWAP) {
3418 thresholds = &memcg->memsw_thresholds;
3419 usage = mem_cgroup_usage(memcg, true);
3423 /* Check if a threshold crossed before adding a new one */
3424 if (thresholds->primary)
3425 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3427 size = thresholds->primary ? thresholds->primary->size + 1 : 1;
3429 /* Allocate memory for new array of thresholds */
3430 new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
3438 /* Copy thresholds (if any) to new array */
3439 if (thresholds->primary) {
3440 memcpy(new->entries, thresholds->primary->entries, (size - 1) *
3441 sizeof(struct mem_cgroup_threshold));
3444 /* Add new threshold */
3445 new->entries[size - 1].eventfd = eventfd;
3446 new->entries[size - 1].threshold = threshold;
3448 /* Sort thresholds. Registering of new threshold isn't time-critical */
3449 sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
3450 compare_thresholds, NULL);
3452 /* Find current threshold */
3453 new->current_threshold = -1;
3454 for (i = 0; i < size; i++) {
3455 if (new->entries[i].threshold <= usage) {
3457 * new->current_threshold will not be used until
3458 * rcu_assign_pointer(), so it's safe to increment
3461 ++new->current_threshold;
3466 /* Free old spare buffer and save old primary buffer as spare */
3467 kfree(thresholds->spare);
3468 thresholds->spare = thresholds->primary;
3470 rcu_assign_pointer(thresholds->primary, new);
3472 /* To be sure that nobody uses thresholds */
3476 mutex_unlock(&memcg->thresholds_lock);
3481 static int mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
3482 struct eventfd_ctx *eventfd, const char *args)
3484 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEM);
3487 static int memsw_cgroup_usage_register_event(struct mem_cgroup *memcg,
3488 struct eventfd_ctx *eventfd, const char *args)
3490 return __mem_cgroup_usage_register_event(memcg, eventfd, args, _MEMSWAP);
3493 static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3494 struct eventfd_ctx *eventfd, enum res_type type)
3496 struct mem_cgroup_thresholds *thresholds;
3497 struct mem_cgroup_threshold_ary *new;
3498 unsigned long usage;
3501 mutex_lock(&memcg->thresholds_lock);
3504 thresholds = &memcg->thresholds;
3505 usage = mem_cgroup_usage(memcg, false);
3506 } else if (type == _MEMSWAP) {
3507 thresholds = &memcg->memsw_thresholds;
3508 usage = mem_cgroup_usage(memcg, true);
3512 if (!thresholds->primary)
3515 /* Check if a threshold crossed before removing */
3516 __mem_cgroup_threshold(memcg, type == _MEMSWAP);
3518 /* Calculate new number of threshold */
3520 for (i = 0; i < thresholds->primary->size; i++) {
3521 if (thresholds->primary->entries[i].eventfd != eventfd)
3525 new = thresholds->spare;
3527 /* Set thresholds array to NULL if we don't have thresholds */
3536 /* Copy thresholds and find current threshold */
3537 new->current_threshold = -1;
3538 for (i = 0, j = 0; i < thresholds->primary->size; i++) {
3539 if (thresholds->primary->entries[i].eventfd == eventfd)
3542 new->entries[j] = thresholds->primary->entries[i];
3543 if (new->entries[j].threshold <= usage) {
3545 * new->current_threshold will not be used
3546 * until rcu_assign_pointer(), so it's safe to increment
3549 ++new->current_threshold;
3555 /* Swap primary and spare array */
3556 thresholds->spare = thresholds->primary;
3557 /* If all events are unregistered, free the spare array */
3559 kfree(thresholds->spare);
3560 thresholds->spare = NULL;
3563 rcu_assign_pointer(thresholds->primary, new);
3565 /* To be sure that nobody uses thresholds */
3568 mutex_unlock(&memcg->thresholds_lock);
3571 static void mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3572 struct eventfd_ctx *eventfd)
3574 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEM);
3577 static void memsw_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
3578 struct eventfd_ctx *eventfd)
3580 return __mem_cgroup_usage_unregister_event(memcg, eventfd, _MEMSWAP);
3583 static int mem_cgroup_oom_register_event(struct mem_cgroup *memcg,
3584 struct eventfd_ctx *eventfd, const char *args)
3586 struct mem_cgroup_eventfd_list *event;
3588 event = kmalloc(sizeof(*event), GFP_KERNEL);
3592 spin_lock(&memcg_oom_lock);
3594 event->eventfd = eventfd;
3595 list_add(&event->list, &memcg->oom_notify);
3597 /* already in OOM ? */
3598 if (memcg->under_oom)
3599 eventfd_signal(eventfd, 1);
3600 spin_unlock(&memcg_oom_lock);
3605 static void mem_cgroup_oom_unregister_event(struct mem_cgroup *memcg,
3606 struct eventfd_ctx *eventfd)
3608 struct mem_cgroup_eventfd_list *ev, *tmp;
3610 spin_lock(&memcg_oom_lock);
3612 list_for_each_entry_safe(ev, tmp, &memcg->oom_notify, list) {
3613 if (ev->eventfd == eventfd) {
3614 list_del(&ev->list);
3619 spin_unlock(&memcg_oom_lock);
3622 static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
3624 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(sf));
3626 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
3627 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
3631 static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
3632 struct cftype *cft, u64 val)
3634 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3636 /* cannot set to root cgroup and only 0 and 1 are allowed */
3637 if (!css->parent || !((val == 0) || (val == 1)))
3640 memcg->oom_kill_disable = val;
3642 memcg_oom_recover(memcg);
3647 #ifdef CONFIG_MEMCG_KMEM
3648 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
3652 ret = memcg_propagate_kmem(memcg);
3656 return mem_cgroup_sockets_init(memcg, ss);
3659 static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
3661 struct cgroup_subsys_state *css;
3662 struct mem_cgroup *parent, *child;
3665 if (!memcg->kmem_acct_active)
3669 * Clear the 'active' flag before clearing memcg_caches arrays entries.
3670 * Since we take the slab_mutex in memcg_deactivate_kmem_caches(), it
3671 * guarantees no cache will be created for this cgroup after we are
3672 * done (see memcg_create_kmem_cache()).
3674 memcg->kmem_acct_active = false;
3676 memcg_deactivate_kmem_caches(memcg);
3678 kmemcg_id = memcg->kmemcg_id;
3679 BUG_ON(kmemcg_id < 0);
3681 parent = parent_mem_cgroup(memcg);
3683 parent = root_mem_cgroup;
3686 * Change kmemcg_id of this cgroup and all its descendants to the
3687 * parent's id, and then move all entries from this cgroup's list_lrus
3688 * to ones of the parent. After we have finished, all list_lrus
3689 * corresponding to this cgroup are guaranteed to remain empty. The
3690 * ordering is imposed by list_lru_node->lock taken by
3691 * memcg_drain_all_list_lrus().
3693 css_for_each_descendant_pre(css, &memcg->css) {
3694 child = mem_cgroup_from_css(css);
3695 BUG_ON(child->kmemcg_id != kmemcg_id);
3696 child->kmemcg_id = parent->kmemcg_id;
3697 if (!memcg->use_hierarchy)
3700 memcg_drain_all_list_lrus(kmemcg_id, parent->kmemcg_id);
3702 memcg_free_cache_id(kmemcg_id);
3705 static void memcg_destroy_kmem(struct mem_cgroup *memcg)
3707 if (memcg->kmem_acct_activated) {
3708 memcg_destroy_kmem_caches(memcg);
3709 static_key_slow_dec(&memcg_kmem_enabled_key);
3710 WARN_ON(page_counter_read(&memcg->kmem));
3712 mem_cgroup_sockets_destroy(memcg);
3715 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
3720 static void memcg_deactivate_kmem(struct mem_cgroup *memcg)
3724 static void memcg_destroy_kmem(struct mem_cgroup *memcg)
3729 #ifdef CONFIG_CGROUP_WRITEBACK
3731 struct list_head *mem_cgroup_cgwb_list(struct mem_cgroup *memcg)
3733 return &memcg->cgwb_list;
3736 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3738 return wb_domain_init(&memcg->cgwb_domain, gfp);
3741 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3743 wb_domain_exit(&memcg->cgwb_domain);
3746 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3748 wb_domain_size_changed(&memcg->cgwb_domain);
3751 struct wb_domain *mem_cgroup_wb_domain(struct bdi_writeback *wb)
3753 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3755 if (!memcg->css.parent)
3758 return &memcg->cgwb_domain;
3762 * mem_cgroup_wb_stats - retrieve writeback related stats from its memcg
3763 * @wb: bdi_writeback in question
3764 * @pfilepages: out parameter for number of file pages
3765 * @pheadroom: out parameter for number of allocatable pages according to memcg
3766 * @pdirty: out parameter for number of dirty pages
3767 * @pwriteback: out parameter for number of pages under writeback
3769 * Determine the numbers of file, headroom, dirty, and writeback pages in
3770 * @wb's memcg. File, dirty and writeback are self-explanatory. Headroom
3771 * is a bit more involved.
3773 * A memcg's headroom is "min(max, high) - used". In the hierarchy, the
3774 * headroom is calculated as the lowest headroom of itself and the
3775 * ancestors. Note that this doesn't consider the actual amount of
3776 * available memory in the system. The caller should further cap
3777 * *@pheadroom accordingly.
3779 void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages,
3780 unsigned long *pheadroom, unsigned long *pdirty,
3781 unsigned long *pwriteback)
3783 struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
3784 struct mem_cgroup *parent;
3786 *pdirty = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_DIRTY);
3788 /* this should eventually include NR_UNSTABLE_NFS */
3789 *pwriteback = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
3790 *pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
3791 (1 << LRU_ACTIVE_FILE));
3792 *pheadroom = PAGE_COUNTER_MAX;
3794 while ((parent = parent_mem_cgroup(memcg))) {
3795 unsigned long ceiling = min(memcg->memory.limit, memcg->high);
3796 unsigned long used = page_counter_read(&memcg->memory);
3798 *pheadroom = min(*pheadroom, ceiling - min(ceiling, used));
3803 #else /* CONFIG_CGROUP_WRITEBACK */
3805 static int memcg_wb_domain_init(struct mem_cgroup *memcg, gfp_t gfp)
3810 static void memcg_wb_domain_exit(struct mem_cgroup *memcg)
3814 static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg)
3818 #endif /* CONFIG_CGROUP_WRITEBACK */
3821 * DO NOT USE IN NEW FILES.
3823 * "cgroup.event_control" implementation.
3825 * This is way over-engineered. It tries to support fully configurable
3826 * events for each user. Such level of flexibility is completely
3827 * unnecessary especially in the light of the planned unified hierarchy.
3829 * Please deprecate this and replace with something simpler if at all
3834 * Unregister event and free resources.
3836 * Gets called from workqueue.
3838 static void memcg_event_remove(struct work_struct *work)
3840 struct mem_cgroup_event *event =
3841 container_of(work, struct mem_cgroup_event, remove);
3842 struct mem_cgroup *memcg = event->memcg;
3844 remove_wait_queue(event->wqh, &event->wait);
3846 event->unregister_event(memcg, event->eventfd);
3848 /* Notify userspace the event is going away. */
3849 eventfd_signal(event->eventfd, 1);
3851 eventfd_ctx_put(event->eventfd);
3853 css_put(&memcg->css);
3857 * Gets called on POLLHUP on eventfd when user closes it.
3859 * Called with wqh->lock held and interrupts disabled.
3861 static int memcg_event_wake(wait_queue_t *wait, unsigned mode,
3862 int sync, void *key)
3864 struct mem_cgroup_event *event =
3865 container_of(wait, struct mem_cgroup_event, wait);
3866 struct mem_cgroup *memcg = event->memcg;
3867 unsigned long flags = (unsigned long)key;
3869 if (flags & POLLHUP) {
3871 * If the event has been detached at cgroup removal, we
3872 * can simply return knowing the other side will cleanup
3875 * We can't race against event freeing since the other
3876 * side will require wqh->lock via remove_wait_queue(),
3879 spin_lock(&memcg->event_list_lock);
3880 if (!list_empty(&event->list)) {
3881 list_del_init(&event->list);
3883 * We are in atomic context, but cgroup_event_remove()
3884 * may sleep, so we have to call it in workqueue.
3886 schedule_work(&event->remove);
3888 spin_unlock(&memcg->event_list_lock);
3894 static void memcg_event_ptable_queue_proc(struct file *file,
3895 wait_queue_head_t *wqh, poll_table *pt)
3897 struct mem_cgroup_event *event =
3898 container_of(pt, struct mem_cgroup_event, pt);
3901 add_wait_queue(wqh, &event->wait);
3905 * DO NOT USE IN NEW FILES.
3907 * Parse input and register new cgroup event handler.
3909 * Input must be in format '<event_fd> <control_fd> <args>'.
3910 * Interpretation of args is defined by control file implementation.
3912 static ssize_t memcg_write_event_control(struct kernfs_open_file *of,
3913 char *buf, size_t nbytes, loff_t off)
3915 struct cgroup_subsys_state *css = of_css(of);
3916 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
3917 struct mem_cgroup_event *event;
3918 struct cgroup_subsys_state *cfile_css;
3919 unsigned int efd, cfd;
3926 buf = strstrip(buf);
3928 efd = simple_strtoul(buf, &endp, 10);
3933 cfd = simple_strtoul(buf, &endp, 10);
3934 if ((*endp != ' ') && (*endp != '\0'))
3938 event = kzalloc(sizeof(*event), GFP_KERNEL);
3942 event->memcg = memcg;
3943 INIT_LIST_HEAD(&event->list);
3944 init_poll_funcptr(&event->pt, memcg_event_ptable_queue_proc);
3945 init_waitqueue_func_entry(&event->wait, memcg_event_wake);
3946 INIT_WORK(&event->remove, memcg_event_remove);
3954 event->eventfd = eventfd_ctx_fileget(efile.file);
3955 if (IS_ERR(event->eventfd)) {
3956 ret = PTR_ERR(event->eventfd);
3963 goto out_put_eventfd;
3966 /* the process need read permission on control file */
3967 /* AV: shouldn't we check that it's been opened for read instead? */
3968 ret = inode_permission(file_inode(cfile.file), MAY_READ);
3973 * Determine the event callbacks and set them in @event. This used
3974 * to be done via struct cftype but cgroup core no longer knows
3975 * about these events. The following is crude but the whole thing
3976 * is for compatibility anyway.
3978 * DO NOT ADD NEW FILES.
3980 name = cfile.file->f_path.dentry->d_name.name;
3982 if (!strcmp(name, "memory.usage_in_bytes")) {
3983 event->register_event = mem_cgroup_usage_register_event;
3984 event->unregister_event = mem_cgroup_usage_unregister_event;
3985 } else if (!strcmp(name, "memory.oom_control")) {
3986 event->register_event = mem_cgroup_oom_register_event;
3987 event->unregister_event = mem_cgroup_oom_unregister_event;
3988 } else if (!strcmp(name, "memory.pressure_level")) {
3989 event->register_event = vmpressure_register_event;
3990 event->unregister_event = vmpressure_unregister_event;
3991 } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) {
3992 event->register_event = memsw_cgroup_usage_register_event;
3993 event->unregister_event = memsw_cgroup_usage_unregister_event;
4000 * Verify @cfile should belong to @css. Also, remaining events are
4001 * automatically removed on cgroup destruction but the removal is
4002 * asynchronous, so take an extra ref on @css.
4004 cfile_css = css_tryget_online_from_dir(cfile.file->f_path.dentry->d_parent,
4005 &memory_cgrp_subsys);
4007 if (IS_ERR(cfile_css))
4009 if (cfile_css != css) {
4014 ret = event->register_event(memcg, event->eventfd, buf);
4018 efile.file->f_op->poll(efile.file, &event->pt);
4020 spin_lock(&memcg->event_list_lock);
4021 list_add(&event->list, &memcg->event_list);
4022 spin_unlock(&memcg->event_list_lock);
4034 eventfd_ctx_put(event->eventfd);
4043 static struct cftype mem_cgroup_legacy_files[] = {
4045 .name = "usage_in_bytes",
4046 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4047 .read_u64 = mem_cgroup_read_u64,
4050 .name = "max_usage_in_bytes",
4051 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4052 .write = mem_cgroup_reset,
4053 .read_u64 = mem_cgroup_read_u64,
4056 .name = "limit_in_bytes",
4057 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4058 .write = mem_cgroup_write,
4059 .read_u64 = mem_cgroup_read_u64,
4062 .name = "soft_limit_in_bytes",
4063 .private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4064 .write = mem_cgroup_write,
4065 .read_u64 = mem_cgroup_read_u64,
4069 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4070 .write = mem_cgroup_reset,
4071 .read_u64 = mem_cgroup_read_u64,
4075 .seq_show = memcg_stat_show,
4078 .name = "force_empty",
4079 .write = mem_cgroup_force_empty_write,
4082 .name = "use_hierarchy",
4083 .write_u64 = mem_cgroup_hierarchy_write,
4084 .read_u64 = mem_cgroup_hierarchy_read,
4087 .name = "cgroup.event_control", /* XXX: for compat */
4088 .write = memcg_write_event_control,
4089 .flags = CFTYPE_NO_PREFIX,
4093 .name = "swappiness",
4094 .read_u64 = mem_cgroup_swappiness_read,
4095 .write_u64 = mem_cgroup_swappiness_write,
4098 .name = "move_charge_at_immigrate",
4099 .read_u64 = mem_cgroup_move_charge_read,
4100 .write_u64 = mem_cgroup_move_charge_write,
4103 .name = "oom_control",
4104 .seq_show = mem_cgroup_oom_control_read,
4105 .write_u64 = mem_cgroup_oom_control_write,
4106 .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4109 .name = "pressure_level",
4113 .name = "numa_stat",
4114 .seq_show = memcg_numa_stat_show,
4117 #ifdef CONFIG_MEMCG_KMEM
4119 .name = "kmem.limit_in_bytes",
4120 .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT),
4121 .write = mem_cgroup_write,
4122 .read_u64 = mem_cgroup_read_u64,
4125 .name = "kmem.usage_in_bytes",
4126 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
4127 .read_u64 = mem_cgroup_read_u64,
4130 .name = "kmem.failcnt",
4131 .private = MEMFILE_PRIVATE(_KMEM, RES_FAILCNT),
4132 .write = mem_cgroup_reset,
4133 .read_u64 = mem_cgroup_read_u64,
4136 .name = "kmem.max_usage_in_bytes",
4137 .private = MEMFILE_PRIVATE(_KMEM, RES_MAX_USAGE),
4138 .write = mem_cgroup_reset,
4139 .read_u64 = mem_cgroup_read_u64,
4141 #ifdef CONFIG_SLABINFO
4143 .name = "kmem.slabinfo",
4144 .seq_start = slab_start,
4145 .seq_next = slab_next,
4146 .seq_stop = slab_stop,
4147 .seq_show = memcg_slab_show,
4151 { }, /* terminate */
4154 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4156 struct mem_cgroup_per_node *pn;
4157 struct mem_cgroup_per_zone *mz;
4158 int zone, tmp = node;
4160 * This routine is called against possible nodes.
4161 * But it's BUG to call kmalloc() against offline node.
4163 * TODO: this routine can waste much memory for nodes which will
4164 * never be onlined. It's better to use memory hotplug callback
4167 if (!node_state(node, N_NORMAL_MEMORY))
4169 pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4173 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4174 mz = &pn->zoneinfo[zone];
4175 lruvec_init(&mz->lruvec);
4176 mz->usage_in_excess = 0;
4177 mz->on_tree = false;
4180 memcg->nodeinfo[node] = pn;
4184 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4186 kfree(memcg->nodeinfo[node]);
4189 static struct mem_cgroup *mem_cgroup_alloc(void)
4191 struct mem_cgroup *memcg;
4194 size = sizeof(struct mem_cgroup);
4195 size += nr_node_ids * sizeof(struct mem_cgroup_per_node *);
4197 memcg = kzalloc(size, GFP_KERNEL);
4201 memcg->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4205 if (memcg_wb_domain_init(memcg, GFP_KERNEL))
4211 free_percpu(memcg->stat);
4218 * At destroying mem_cgroup, references from swap_cgroup can remain.
4219 * (scanning all at force_empty is too costly...)
4221 * Instead of clearing all references at force_empty, we remember
4222 * the number of reference from swap_cgroup and free mem_cgroup when
4223 * it goes down to 0.
4225 * Removal of cgroup itself succeeds regardless of refs from swap.
4228 static void __mem_cgroup_free(struct mem_cgroup *memcg)
4232 mem_cgroup_remove_from_trees(memcg);
4235 free_mem_cgroup_per_zone_info(memcg, node);
4237 free_percpu(memcg->stat);
4238 memcg_wb_domain_exit(memcg);
4243 * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4245 struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
4247 if (!memcg->memory.parent)
4249 return mem_cgroup_from_counter(memcg->memory.parent, memory);
4251 EXPORT_SYMBOL(parent_mem_cgroup);
4253 static struct cgroup_subsys_state * __ref
4254 mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
4256 struct mem_cgroup *memcg;
4257 long error = -ENOMEM;
4260 memcg = mem_cgroup_alloc();
4262 return ERR_PTR(error);
4265 if (alloc_mem_cgroup_per_zone_info(memcg, node))
4269 if (parent_css == NULL) {
4270 root_mem_cgroup = memcg;
4271 mem_cgroup_root_css = &memcg->css;
4272 page_counter_init(&memcg->memory, NULL);
4273 memcg->high = PAGE_COUNTER_MAX;
4274 memcg->soft_limit = PAGE_COUNTER_MAX;
4275 page_counter_init(&memcg->memsw, NULL);
4276 page_counter_init(&memcg->kmem, NULL);
4279 memcg->last_scanned_node = MAX_NUMNODES;
4280 INIT_LIST_HEAD(&memcg->oom_notify);
4281 memcg->move_charge_at_immigrate = 0;
4282 mutex_init(&memcg->thresholds_lock);
4283 spin_lock_init(&memcg->move_lock);
4284 vmpressure_init(&memcg->vmpressure);
4285 INIT_LIST_HEAD(&memcg->event_list);
4286 spin_lock_init(&memcg->event_list_lock);
4287 #ifdef CONFIG_MEMCG_KMEM
4288 memcg->kmemcg_id = -1;
4290 #ifdef CONFIG_CGROUP_WRITEBACK
4291 INIT_LIST_HEAD(&memcg->cgwb_list);
4296 __mem_cgroup_free(memcg);
4297 return ERR_PTR(error);
4301 mem_cgroup_css_online(struct cgroup_subsys_state *css)
4303 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4304 struct mem_cgroup *parent = mem_cgroup_from_css(css->parent);
4307 if (css->id > MEM_CGROUP_ID_MAX)
4313 mutex_lock(&memcg_create_mutex);
4315 memcg->use_hierarchy = parent->use_hierarchy;
4316 memcg->oom_kill_disable = parent->oom_kill_disable;
4317 memcg->swappiness = mem_cgroup_swappiness(parent);
4319 if (parent->use_hierarchy) {
4320 page_counter_init(&memcg->memory, &parent->memory);
4321 memcg->high = PAGE_COUNTER_MAX;
4322 memcg->soft_limit = PAGE_COUNTER_MAX;
4323 page_counter_init(&memcg->memsw, &parent->memsw);
4324 page_counter_init(&memcg->kmem, &parent->kmem);
4327 * No need to take a reference to the parent because cgroup
4328 * core guarantees its existence.
4331 page_counter_init(&memcg->memory, NULL);
4332 memcg->high = PAGE_COUNTER_MAX;
4333 memcg->soft_limit = PAGE_COUNTER_MAX;
4334 page_counter_init(&memcg->memsw, NULL);
4335 page_counter_init(&memcg->kmem, NULL);
4337 * Deeper hierachy with use_hierarchy == false doesn't make
4338 * much sense so let cgroup subsystem know about this
4339 * unfortunate state in our controller.
4341 if (parent != root_mem_cgroup)
4342 memory_cgrp_subsys.broken_hierarchy = true;
4344 mutex_unlock(&memcg_create_mutex);
4346 ret = memcg_init_kmem(memcg, &memory_cgrp_subsys);
4351 * Make sure the memcg is initialized: mem_cgroup_iter()
4352 * orders reading memcg->initialized against its callers
4353 * reading the memcg members.
4355 smp_store_release(&memcg->initialized, 1);
4360 static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
4362 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4363 struct mem_cgroup_event *event, *tmp;
4366 * Unregister events and notify userspace.
4367 * Notify userspace about cgroup removing only after rmdir of cgroup
4368 * directory to avoid race between userspace and kernelspace.
4370 spin_lock(&memcg->event_list_lock);
4371 list_for_each_entry_safe(event, tmp, &memcg->event_list, list) {
4372 list_del_init(&event->list);
4373 schedule_work(&event->remove);
4375 spin_unlock(&memcg->event_list_lock);
4377 vmpressure_cleanup(&memcg->vmpressure);
4379 memcg_deactivate_kmem(memcg);
4381 wb_memcg_offline(memcg);
4384 static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
4386 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4388 memcg_destroy_kmem(memcg);
4389 __mem_cgroup_free(memcg);
4393 * mem_cgroup_css_reset - reset the states of a mem_cgroup
4394 * @css: the target css
4396 * Reset the states of the mem_cgroup associated with @css. This is
4397 * invoked when the userland requests disabling on the default hierarchy
4398 * but the memcg is pinned through dependency. The memcg should stop
4399 * applying policies and should revert to the vanilla state as it may be
4400 * made visible again.
4402 * The current implementation only resets the essential configurations.
4403 * This needs to be expanded to cover all the visible parts.
4405 static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
4407 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4409 mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX);
4410 mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX);
4411 memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX);
4413 memcg->high = PAGE_COUNTER_MAX;
4414 memcg->soft_limit = PAGE_COUNTER_MAX;
4415 memcg_wb_domain_size_changed(memcg);
4419 /* Handlers for move charge at task migration. */
4420 static int mem_cgroup_do_precharge(unsigned long count)
4424 /* Try a single bulk charge without reclaim first */
4425 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_WAIT, count);
4427 mc.precharge += count;
4431 /* Try charges one by one with reclaim */
4433 ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
4443 * get_mctgt_type - get target type of moving charge
4444 * @vma: the vma the pte to be checked belongs
4445 * @addr: the address corresponding to the pte to be checked
4446 * @ptent: the pte to be checked
4447 * @target: the pointer the target page or swap ent will be stored(can be NULL)
4450 * 0(MC_TARGET_NONE): if the pte is not a target for move charge.
4451 * 1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4452 * move charge. if @target is not NULL, the page is stored in target->page
4453 * with extra refcnt got(Callers should handle it).
4454 * 2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4455 * target for charge migration. if @target is not NULL, the entry is stored
4458 * Called with pte lock held.
4465 enum mc_target_type {
4471 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4472 unsigned long addr, pte_t ptent)
4474 struct page *page = vm_normal_page(vma, addr, ptent);
4476 if (!page || !page_mapped(page))
4478 if (PageAnon(page)) {
4479 if (!(mc.flags & MOVE_ANON))
4482 if (!(mc.flags & MOVE_FILE))
4485 if (!get_page_unless_zero(page))
4492 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4493 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4495 struct page *page = NULL;
4496 swp_entry_t ent = pte_to_swp_entry(ptent);
4498 if (!(mc.flags & MOVE_ANON) || non_swap_entry(ent))
4501 * Because lookup_swap_cache() updates some statistics counter,
4502 * we call find_get_page() with swapper_space directly.
4504 page = find_get_page(swap_address_space(ent), ent.val);
4505 if (do_swap_account)
4506 entry->val = ent.val;
4511 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4512 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4518 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4519 unsigned long addr, pte_t ptent, swp_entry_t *entry)
4521 struct page *page = NULL;
4522 struct address_space *mapping;
4525 if (!vma->vm_file) /* anonymous vma */
4527 if (!(mc.flags & MOVE_FILE))
4530 mapping = vma->vm_file->f_mapping;
4531 pgoff = linear_page_index(vma, addr);
4533 /* page is moved even if it's not RSS of this task(page-faulted). */
4535 /* shmem/tmpfs may report page out on swap: account for that too. */
4536 if (shmem_mapping(mapping)) {
4537 page = find_get_entry(mapping, pgoff);
4538 if (radix_tree_exceptional_entry(page)) {
4539 swp_entry_t swp = radix_to_swp_entry(page);
4540 if (do_swap_account)
4542 page = find_get_page(swap_address_space(swp), swp.val);
4545 page = find_get_page(mapping, pgoff);
4547 page = find_get_page(mapping, pgoff);
4553 * mem_cgroup_move_account - move account of the page
4555 * @nr_pages: number of regular pages (>1 for huge pages)
4556 * @from: mem_cgroup which the page is moved from.
4557 * @to: mem_cgroup which the page is moved to. @from != @to.
4559 * The caller must confirm following.
4560 * - page is not on LRU (isolate_page() is useful.)
4561 * - compound_lock is held when nr_pages > 1
4563 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
4566 static int mem_cgroup_move_account(struct page *page,
4567 unsigned int nr_pages,
4568 struct mem_cgroup *from,
4569 struct mem_cgroup *to)
4571 unsigned long flags;
4575 VM_BUG_ON(from == to);
4576 VM_BUG_ON_PAGE(PageLRU(page), page);
4578 * The page is isolated from LRU. So, collapse function
4579 * will not handle this page. But page splitting can happen.
4580 * Do this check under compound_page_lock(). The caller should
4584 if (nr_pages > 1 && !PageTransHuge(page))
4588 * Prevent mem_cgroup_migrate() from looking at page->mem_cgroup
4589 * of its source page while we change it: page migration takes
4590 * both pages off the LRU, but page cache replacement doesn't.
4592 if (!trylock_page(page))
4596 if (page->mem_cgroup != from)
4599 anon = PageAnon(page);
4601 spin_lock_irqsave(&from->move_lock, flags);
4603 if (!anon && page_mapped(page)) {
4604 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4606 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED],
4611 * move_lock grabbed above and caller set from->moving_account, so
4612 * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
4613 * So mapping should be stable for dirty pages.
4615 if (!anon && PageDirty(page)) {
4616 struct address_space *mapping = page_mapping(page);
4618 if (mapping_cap_account_dirty(mapping)) {
4619 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_DIRTY],
4621 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_DIRTY],
4626 if (PageWriteback(page)) {
4627 __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4629 __this_cpu_add(to->stat->count[MEM_CGROUP_STAT_WRITEBACK],
4634 * It is safe to change page->mem_cgroup here because the page
4635 * is referenced, charged, and isolated - we can't race with
4636 * uncharging, charging, migration, or LRU putback.
4639 /* caller should have done css_get */
4640 page->mem_cgroup = to;
4641 spin_unlock_irqrestore(&from->move_lock, flags);
4645 local_irq_disable();
4646 mem_cgroup_charge_statistics(to, page, nr_pages);
4647 memcg_check_events(to, page);
4648 mem_cgroup_charge_statistics(from, page, -nr_pages);
4649 memcg_check_events(from, page);
4657 static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma,
4658 unsigned long addr, pte_t ptent, union mc_target *target)
4660 struct page *page = NULL;
4661 enum mc_target_type ret = MC_TARGET_NONE;
4662 swp_entry_t ent = { .val = 0 };
4664 if (pte_present(ptent))
4665 page = mc_handle_present_pte(vma, addr, ptent);
4666 else if (is_swap_pte(ptent))
4667 page = mc_handle_swap_pte(vma, addr, ptent, &ent);
4668 else if (pte_none(ptent))
4669 page = mc_handle_file_pte(vma, addr, ptent, &ent);
4671 if (!page && !ent.val)
4675 * Do only loose check w/o serialization.
4676 * mem_cgroup_move_account() checks the page is valid or
4677 * not under LRU exclusion.
4679 if (page->mem_cgroup == mc.from) {
4680 ret = MC_TARGET_PAGE;
4682 target->page = page;
4684 if (!ret || !target)
4687 /* There is a swap entry and a page doesn't exist or isn't charged */
4688 if (ent.val && !ret &&
4689 mem_cgroup_id(mc.from) == lookup_swap_cgroup_id(ent)) {
4690 ret = MC_TARGET_SWAP;
4697 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
4699 * We don't consider swapping or file mapped pages because THP does not
4700 * support them for now.
4701 * Caller should make sure that pmd_trans_huge(pmd) is true.
4703 static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4704 unsigned long addr, pmd_t pmd, union mc_target *target)
4706 struct page *page = NULL;
4707 enum mc_target_type ret = MC_TARGET_NONE;
4709 page = pmd_page(pmd);
4710 VM_BUG_ON_PAGE(!page || !PageHead(page), page);
4711 if (!(mc.flags & MOVE_ANON))
4713 if (page->mem_cgroup == mc.from) {
4714 ret = MC_TARGET_PAGE;
4717 target->page = page;
4723 static inline enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
4724 unsigned long addr, pmd_t pmd, union mc_target *target)
4726 return MC_TARGET_NONE;
4730 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4731 unsigned long addr, unsigned long end,
4732 struct mm_walk *walk)
4734 struct vm_area_struct *vma = walk->vma;
4738 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
4739 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
4740 mc.precharge += HPAGE_PMD_NR;
4745 if (pmd_trans_unstable(pmd))
4747 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4748 for (; addr != end; pte++, addr += PAGE_SIZE)
4749 if (get_mctgt_type(vma, addr, *pte, NULL))
4750 mc.precharge++; /* increment precharge temporarily */
4751 pte_unmap_unlock(pte - 1, ptl);
4757 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4759 unsigned long precharge;
4761 struct mm_walk mem_cgroup_count_precharge_walk = {
4762 .pmd_entry = mem_cgroup_count_precharge_pte_range,
4765 down_read(&mm->mmap_sem);
4766 walk_page_range(0, ~0UL, &mem_cgroup_count_precharge_walk);
4767 up_read(&mm->mmap_sem);
4769 precharge = mc.precharge;
4775 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4777 unsigned long precharge = mem_cgroup_count_precharge(mm);
4779 VM_BUG_ON(mc.moving_task);
4780 mc.moving_task = current;
4781 return mem_cgroup_do_precharge(precharge);
4784 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
4785 static void __mem_cgroup_clear_mc(void)
4787 struct mem_cgroup *from = mc.from;
4788 struct mem_cgroup *to = mc.to;
4790 /* we must uncharge all the leftover precharges from mc.to */
4792 cancel_charge(mc.to, mc.precharge);
4796 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4797 * we must uncharge here.
4799 if (mc.moved_charge) {
4800 cancel_charge(mc.from, mc.moved_charge);
4801 mc.moved_charge = 0;
4803 /* we must fixup refcnts and charges */
4804 if (mc.moved_swap) {
4805 /* uncharge swap account from the old cgroup */
4806 if (!mem_cgroup_is_root(mc.from))
4807 page_counter_uncharge(&mc.from->memsw, mc.moved_swap);
4810 * we charged both to->memory and to->memsw, so we
4811 * should uncharge to->memory.
4813 if (!mem_cgroup_is_root(mc.to))
4814 page_counter_uncharge(&mc.to->memory, mc.moved_swap);
4816 css_put_many(&mc.from->css, mc.moved_swap);
4818 /* we've already done css_get(mc.to) */
4821 memcg_oom_recover(from);
4822 memcg_oom_recover(to);
4823 wake_up_all(&mc.waitq);
4826 static void mem_cgroup_clear_mc(void)
4829 * we must clear moving_task before waking up waiters at the end of
4832 mc.moving_task = NULL;
4833 __mem_cgroup_clear_mc();
4834 spin_lock(&mc.lock);
4837 spin_unlock(&mc.lock);
4840 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
4841 struct cgroup_taskset *tset)
4843 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
4844 struct mem_cgroup *from;
4845 struct task_struct *p;
4846 struct mm_struct *mm;
4847 unsigned long move_flags;
4851 * We are now commited to this value whatever it is. Changes in this
4852 * tunable will only affect upcoming migrations, not the current one.
4853 * So we need to save it, and keep it going.
4855 move_flags = READ_ONCE(memcg->move_charge_at_immigrate);
4859 p = cgroup_taskset_first(tset);
4860 from = mem_cgroup_from_task(p);
4862 VM_BUG_ON(from == memcg);
4864 mm = get_task_mm(p);
4867 /* We move charges only when we move a owner of the mm */
4868 if (mm->owner == p) {
4871 VM_BUG_ON(mc.precharge);
4872 VM_BUG_ON(mc.moved_charge);
4873 VM_BUG_ON(mc.moved_swap);
4875 spin_lock(&mc.lock);
4878 mc.flags = move_flags;
4879 spin_unlock(&mc.lock);
4880 /* We set mc.moving_task later */
4882 ret = mem_cgroup_precharge_mc(mm);
4884 mem_cgroup_clear_mc();
4890 static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
4891 struct cgroup_taskset *tset)
4894 mem_cgroup_clear_mc();
4897 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
4898 unsigned long addr, unsigned long end,
4899 struct mm_walk *walk)
4902 struct vm_area_struct *vma = walk->vma;
4905 enum mc_target_type target_type;
4906 union mc_target target;
4910 * We don't take compound_lock() here but no race with splitting thp
4912 * - if pmd_trans_huge_lock() returns 1, the relevant thp is not
4913 * under splitting, which means there's no concurrent thp split,
4914 * - if another thread runs into split_huge_page() just after we
4915 * entered this if-block, the thread must wait for page table lock
4916 * to be unlocked in __split_huge_page_splitting(), where the main
4917 * part of thp split is not executed yet.
4919 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
4920 if (mc.precharge < HPAGE_PMD_NR) {
4924 target_type = get_mctgt_type_thp(vma, addr, *pmd, &target);
4925 if (target_type == MC_TARGET_PAGE) {
4927 if (!isolate_lru_page(page)) {
4928 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
4930 mc.precharge -= HPAGE_PMD_NR;
4931 mc.moved_charge += HPAGE_PMD_NR;
4933 putback_lru_page(page);
4941 if (pmd_trans_unstable(pmd))
4944 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4945 for (; addr != end; addr += PAGE_SIZE) {
4946 pte_t ptent = *(pte++);
4952 switch (get_mctgt_type(vma, addr, ptent, &target)) {
4953 case MC_TARGET_PAGE:
4955 if (isolate_lru_page(page))
4957 if (!mem_cgroup_move_account(page, 1, mc.from, mc.to)) {
4959 /* we uncharge from mc.from later. */
4962 putback_lru_page(page);
4963 put: /* get_mctgt_type() gets the page */
4966 case MC_TARGET_SWAP:
4968 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
4970 /* we fixup refcnts and charges later. */
4978 pte_unmap_unlock(pte - 1, ptl);
4983 * We have consumed all precharges we got in can_attach().
4984 * We try charge one by one, but don't do any additional
4985 * charges to mc.to if we have failed in charge once in attach()
4988 ret = mem_cgroup_do_precharge(1);
4996 static void mem_cgroup_move_charge(struct mm_struct *mm)
4998 struct mm_walk mem_cgroup_move_charge_walk = {
4999 .pmd_entry = mem_cgroup_move_charge_pte_range,
5003 lru_add_drain_all();
5005 * Signal mem_cgroup_begin_page_stat() to take the memcg's
5006 * move_lock while we're moving its pages to another memcg.
5007 * Then wait for already started RCU-only updates to finish.
5009 atomic_inc(&mc.from->moving_account);
5012 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5014 * Someone who are holding the mmap_sem might be waiting in
5015 * waitq. So we cancel all extra charges, wake up all waiters,
5016 * and retry. Because we cancel precharges, we might not be able
5017 * to move enough charges, but moving charge is a best-effort
5018 * feature anyway, so it wouldn't be a big problem.
5020 __mem_cgroup_clear_mc();
5025 * When we have consumed all precharges and failed in doing
5026 * additional charge, the page walk just aborts.
5028 walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
5029 up_read(&mm->mmap_sem);
5030 atomic_dec(&mc.from->moving_account);
5033 static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
5034 struct cgroup_taskset *tset)
5036 struct task_struct *p = cgroup_taskset_first(tset);
5037 struct mm_struct *mm = get_task_mm(p);
5041 mem_cgroup_move_charge(mm);
5045 mem_cgroup_clear_mc();
5047 #else /* !CONFIG_MMU */
5048 static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
5049 struct cgroup_taskset *tset)
5053 static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
5054 struct cgroup_taskset *tset)
5057 static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
5058 struct cgroup_taskset *tset)
5064 * Cgroup retains root cgroups across [un]mount cycles making it necessary
5065 * to verify whether we're attached to the default hierarchy on each mount
5068 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
5071 * use_hierarchy is forced on the default hierarchy. cgroup core
5072 * guarantees that @root doesn't have any children, so turning it
5073 * on for the root memcg is enough.
5075 if (cgroup_on_dfl(root_css->cgroup))
5076 root_mem_cgroup->use_hierarchy = true;
5078 root_mem_cgroup->use_hierarchy = false;
5081 static u64 memory_current_read(struct cgroup_subsys_state *css,
5084 return mem_cgroup_usage(mem_cgroup_from_css(css), false);
5087 static int memory_low_show(struct seq_file *m, void *v)
5089 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5090 unsigned long low = READ_ONCE(memcg->low);
5092 if (low == PAGE_COUNTER_MAX)
5093 seq_puts(m, "max\n");
5095 seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE);
5100 static ssize_t memory_low_write(struct kernfs_open_file *of,
5101 char *buf, size_t nbytes, loff_t off)
5103 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5107 buf = strstrip(buf);
5108 err = page_counter_memparse(buf, "max", &low);
5117 static int memory_high_show(struct seq_file *m, void *v)
5119 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5120 unsigned long high = READ_ONCE(memcg->high);
5122 if (high == PAGE_COUNTER_MAX)
5123 seq_puts(m, "max\n");
5125 seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE);
5130 static ssize_t memory_high_write(struct kernfs_open_file *of,
5131 char *buf, size_t nbytes, loff_t off)
5133 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5137 buf = strstrip(buf);
5138 err = page_counter_memparse(buf, "max", &high);
5144 memcg_wb_domain_size_changed(memcg);
5148 static int memory_max_show(struct seq_file *m, void *v)
5150 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5151 unsigned long max = READ_ONCE(memcg->memory.limit);
5153 if (max == PAGE_COUNTER_MAX)
5154 seq_puts(m, "max\n");
5156 seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE);
5161 static ssize_t memory_max_write(struct kernfs_open_file *of,
5162 char *buf, size_t nbytes, loff_t off)
5164 struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
5168 buf = strstrip(buf);
5169 err = page_counter_memparse(buf, "max", &max);
5173 err = mem_cgroup_resize_limit(memcg, max);
5177 memcg_wb_domain_size_changed(memcg);
5181 static int memory_events_show(struct seq_file *m, void *v)
5183 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
5185 seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
5186 seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
5187 seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
5188 seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
5193 static struct cftype memory_files[] = {
5196 .read_u64 = memory_current_read,
5200 .flags = CFTYPE_NOT_ON_ROOT,
5201 .seq_show = memory_low_show,
5202 .write = memory_low_write,
5206 .flags = CFTYPE_NOT_ON_ROOT,
5207 .seq_show = memory_high_show,
5208 .write = memory_high_write,
5212 .flags = CFTYPE_NOT_ON_ROOT,
5213 .seq_show = memory_max_show,
5214 .write = memory_max_write,
5218 .flags = CFTYPE_NOT_ON_ROOT,
5219 .seq_show = memory_events_show,
5224 struct cgroup_subsys memory_cgrp_subsys = {
5225 .css_alloc = mem_cgroup_css_alloc,
5226 .css_online = mem_cgroup_css_online,
5227 .css_offline = mem_cgroup_css_offline,
5228 .css_free = mem_cgroup_css_free,
5229 .css_reset = mem_cgroup_css_reset,
5230 .can_attach = mem_cgroup_can_attach,
5231 .cancel_attach = mem_cgroup_cancel_attach,
5232 .attach = mem_cgroup_move_task,
5233 .bind = mem_cgroup_bind,
5234 .dfl_cftypes = memory_files,
5235 .legacy_cftypes = mem_cgroup_legacy_files,
5240 * mem_cgroup_low - check if memory consumption is below the normal range
5241 * @root: the highest ancestor to consider
5242 * @memcg: the memory cgroup to check
5244 * Returns %true if memory consumption of @memcg, and that of all
5245 * configurable ancestors up to @root, is below the normal range.
5247 bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg)
5249 if (mem_cgroup_disabled())
5253 * The toplevel group doesn't have a configurable range, so
5254 * it's never low when looked at directly, and it is not
5255 * considered an ancestor when assessing the hierarchy.
5258 if (memcg == root_mem_cgroup)
5261 if (page_counter_read(&memcg->memory) >= memcg->low)
5264 while (memcg != root) {
5265 memcg = parent_mem_cgroup(memcg);
5267 if (memcg == root_mem_cgroup)
5270 if (page_counter_read(&memcg->memory) >= memcg->low)
5277 * mem_cgroup_try_charge - try charging a page
5278 * @page: page to charge
5279 * @mm: mm context of the victim
5280 * @gfp_mask: reclaim mode
5281 * @memcgp: charged memcg return
5283 * Try to charge @page to the memcg that @mm belongs to, reclaiming
5284 * pages according to @gfp_mask if necessary.
5286 * Returns 0 on success, with *@memcgp pointing to the charged memcg.
5287 * Otherwise, an error code is returned.
5289 * After page->mapping has been set up, the caller must finalize the
5290 * charge with mem_cgroup_commit_charge(). Or abort the transaction
5291 * with mem_cgroup_cancel_charge() in case page instantiation fails.
5293 int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
5294 gfp_t gfp_mask, struct mem_cgroup **memcgp)
5296 struct mem_cgroup *memcg = NULL;
5297 unsigned int nr_pages = 1;
5300 if (mem_cgroup_disabled())
5303 if (PageSwapCache(page)) {
5305 * Every swap fault against a single page tries to charge the
5306 * page, bail as early as possible. shmem_unuse() encounters
5307 * already charged pages, too. The USED bit is protected by
5308 * the page lock, which serializes swap cache removal, which
5309 * in turn serializes uncharging.
5311 VM_BUG_ON_PAGE(!PageLocked(page), page);
5312 if (page->mem_cgroup)
5315 if (do_swap_account) {
5316 swp_entry_t ent = { .val = page_private(page), };
5317 unsigned short id = lookup_swap_cgroup_id(ent);
5320 memcg = mem_cgroup_from_id(id);
5321 if (memcg && !css_tryget_online(&memcg->css))
5327 if (PageTransHuge(page)) {
5328 nr_pages <<= compound_order(page);
5329 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5333 memcg = get_mem_cgroup_from_mm(mm);
5335 ret = try_charge(memcg, gfp_mask, nr_pages);
5337 css_put(&memcg->css);
5344 * mem_cgroup_commit_charge - commit a page charge
5345 * @page: page to charge
5346 * @memcg: memcg to charge the page to
5347 * @lrucare: page might be on LRU already
5349 * Finalize a charge transaction started by mem_cgroup_try_charge(),
5350 * after page->mapping has been set up. This must happen atomically
5351 * as part of the page instantiation, i.e. under the page table lock
5352 * for anonymous pages, under the page lock for page and swap cache.
5354 * In addition, the page must not be on the LRU during the commit, to
5355 * prevent racing with task migration. If it might be, use @lrucare.
5357 * Use mem_cgroup_cancel_charge() to cancel the transaction instead.
5359 void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
5362 unsigned int nr_pages = 1;
5364 VM_BUG_ON_PAGE(!page->mapping, page);
5365 VM_BUG_ON_PAGE(PageLRU(page) && !lrucare, page);
5367 if (mem_cgroup_disabled())
5370 * Swap faults will attempt to charge the same page multiple
5371 * times. But reuse_swap_page() might have removed the page
5372 * from swapcache already, so we can't check PageSwapCache().
5377 commit_charge(page, memcg, lrucare);
5379 if (PageTransHuge(page)) {
5380 nr_pages <<= compound_order(page);
5381 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5384 local_irq_disable();
5385 mem_cgroup_charge_statistics(memcg, page, nr_pages);
5386 memcg_check_events(memcg, page);
5389 if (do_swap_account && PageSwapCache(page)) {
5390 swp_entry_t entry = { .val = page_private(page) };
5392 * The swap entry might not get freed for a long time,
5393 * let's not wait for it. The page already received a
5394 * memory+swap charge, drop the swap entry duplicate.
5396 mem_cgroup_uncharge_swap(entry);
5401 * mem_cgroup_cancel_charge - cancel a page charge
5402 * @page: page to charge
5403 * @memcg: memcg to charge the page to
5405 * Cancel a charge transaction started by mem_cgroup_try_charge().
5407 void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg)
5409 unsigned int nr_pages = 1;
5411 if (mem_cgroup_disabled())
5414 * Swap faults will attempt to charge the same page multiple
5415 * times. But reuse_swap_page() might have removed the page
5416 * from swapcache already, so we can't check PageSwapCache().
5421 if (PageTransHuge(page)) {
5422 nr_pages <<= compound_order(page);
5423 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5426 cancel_charge(memcg, nr_pages);
5429 static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout,
5430 unsigned long nr_anon, unsigned long nr_file,
5431 unsigned long nr_huge, struct page *dummy_page)
5433 unsigned long nr_pages = nr_anon + nr_file;
5434 unsigned long flags;
5436 if (!mem_cgroup_is_root(memcg)) {
5437 page_counter_uncharge(&memcg->memory, nr_pages);
5438 if (do_swap_account)
5439 page_counter_uncharge(&memcg->memsw, nr_pages);
5440 memcg_oom_recover(memcg);
5443 local_irq_save(flags);
5444 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon);
5445 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file);
5446 __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge);
5447 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout);
5448 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
5449 memcg_check_events(memcg, dummy_page);
5450 local_irq_restore(flags);
5452 if (!mem_cgroup_is_root(memcg))
5453 css_put_many(&memcg->css, nr_pages);
5456 static void uncharge_list(struct list_head *page_list)
5458 struct mem_cgroup *memcg = NULL;
5459 unsigned long nr_anon = 0;
5460 unsigned long nr_file = 0;
5461 unsigned long nr_huge = 0;
5462 unsigned long pgpgout = 0;
5463 struct list_head *next;
5466 next = page_list->next;
5468 unsigned int nr_pages = 1;
5470 page = list_entry(next, struct page, lru);
5471 next = page->lru.next;
5473 VM_BUG_ON_PAGE(PageLRU(page), page);
5474 VM_BUG_ON_PAGE(page_count(page), page);
5476 if (!page->mem_cgroup)
5480 * Nobody should be changing or seriously looking at
5481 * page->mem_cgroup at this point, we have fully
5482 * exclusive access to the page.
5485 if (memcg != page->mem_cgroup) {
5487 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5489 pgpgout = nr_anon = nr_file = nr_huge = 0;
5491 memcg = page->mem_cgroup;
5494 if (PageTransHuge(page)) {
5495 nr_pages <<= compound_order(page);
5496 VM_BUG_ON_PAGE(!PageTransHuge(page), page);
5497 nr_huge += nr_pages;
5501 nr_anon += nr_pages;
5503 nr_file += nr_pages;
5505 page->mem_cgroup = NULL;
5508 } while (next != page_list);
5511 uncharge_batch(memcg, pgpgout, nr_anon, nr_file,
5516 * mem_cgroup_uncharge - uncharge a page
5517 * @page: page to uncharge
5519 * Uncharge a page previously charged with mem_cgroup_try_charge() and
5520 * mem_cgroup_commit_charge().
5522 void mem_cgroup_uncharge(struct page *page)
5524 if (mem_cgroup_disabled())
5527 /* Don't touch page->lru of any random page, pre-check: */
5528 if (!page->mem_cgroup)
5531 INIT_LIST_HEAD(&page->lru);
5532 uncharge_list(&page->lru);
5536 * mem_cgroup_uncharge_list - uncharge a list of page
5537 * @page_list: list of pages to uncharge
5539 * Uncharge a list of pages previously charged with
5540 * mem_cgroup_try_charge() and mem_cgroup_commit_charge().
5542 void mem_cgroup_uncharge_list(struct list_head *page_list)
5544 if (mem_cgroup_disabled())
5547 if (!list_empty(page_list))
5548 uncharge_list(page_list);
5552 * mem_cgroup_migrate - migrate a charge to another page
5553 * @oldpage: currently charged page
5554 * @newpage: page to transfer the charge to
5555 * @lrucare: either or both pages might be on the LRU already
5557 * Migrate the charge from @oldpage to @newpage.
5559 * Both pages must be locked, @newpage->mapping must be set up.
5561 void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
5564 struct mem_cgroup *memcg;
5567 VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
5568 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
5569 VM_BUG_ON_PAGE(!lrucare && PageLRU(oldpage), oldpage);
5570 VM_BUG_ON_PAGE(!lrucare && PageLRU(newpage), newpage);
5571 VM_BUG_ON_PAGE(PageAnon(oldpage) != PageAnon(newpage), newpage);
5572 VM_BUG_ON_PAGE(PageTransHuge(oldpage) != PageTransHuge(newpage),
5575 if (mem_cgroup_disabled())
5578 /* Page cache replacement: new page already charged? */
5579 if (newpage->mem_cgroup)
5583 * Swapcache readahead pages can get migrated before being
5584 * charged, and migration from compaction can happen to an
5585 * uncharged page when the PFN walker finds a page that
5586 * reclaim just put back on the LRU but has not released yet.
5588 memcg = oldpage->mem_cgroup;
5593 lock_page_lru(oldpage, &isolated);
5595 oldpage->mem_cgroup = NULL;
5598 unlock_page_lru(oldpage, isolated);
5600 commit_charge(newpage, memcg, lrucare);
5604 * subsys_initcall() for memory controller.
5606 * Some parts like hotcpu_notifier() have to be initialized from this context
5607 * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically
5608 * everything that doesn't depend on a specific mem_cgroup structure should
5609 * be initialized from here.
5611 static int __init mem_cgroup_init(void)
5615 hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
5617 for_each_possible_cpu(cpu)
5618 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
5621 for_each_node(node) {
5622 struct mem_cgroup_tree_per_node *rtpn;
5625 rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL,
5626 node_online(node) ? node : NUMA_NO_NODE);
5628 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
5629 struct mem_cgroup_tree_per_zone *rtpz;
5631 rtpz = &rtpn->rb_tree_per_zone[zone];
5632 rtpz->rb_root = RB_ROOT;
5633 spin_lock_init(&rtpz->lock);
5635 soft_limit_tree.rb_tree_per_node[node] = rtpn;
5640 subsys_initcall(mem_cgroup_init);
5642 #ifdef CONFIG_MEMCG_SWAP
5644 * mem_cgroup_swapout - transfer a memsw charge to swap
5645 * @page: page whose memsw charge to transfer
5646 * @entry: swap entry to move the charge to
5648 * Transfer the memsw charge of @page to @entry.
5650 void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
5652 struct mem_cgroup *memcg;
5653 unsigned short oldid;
5655 VM_BUG_ON_PAGE(PageLRU(page), page);
5656 VM_BUG_ON_PAGE(page_count(page), page);
5658 if (!do_swap_account)
5661 memcg = page->mem_cgroup;
5663 /* Readahead page, never charged */
5667 oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg));
5668 VM_BUG_ON_PAGE(oldid, page);
5669 mem_cgroup_swap_statistics(memcg, true);
5671 page->mem_cgroup = NULL;
5673 if (!mem_cgroup_is_root(memcg))
5674 page_counter_uncharge(&memcg->memory, 1);
5677 * Interrupts should be disabled here because the caller holds the
5678 * mapping->tree_lock lock which is taken with interrupts-off. It is
5679 * important here to have the interrupts disabled because it is the
5680 * only synchronisation we have for udpating the per-CPU variables.
5682 VM_BUG_ON(!irqs_disabled());
5683 mem_cgroup_charge_statistics(memcg, page, -1);
5684 memcg_check_events(memcg, page);
5688 * mem_cgroup_uncharge_swap - uncharge a swap entry
5689 * @entry: swap entry to uncharge
5691 * Drop the memsw charge associated with @entry.
5693 void mem_cgroup_uncharge_swap(swp_entry_t entry)
5695 struct mem_cgroup *memcg;
5698 if (!do_swap_account)
5701 id = swap_cgroup_record(entry, 0);
5703 memcg = mem_cgroup_from_id(id);
5705 if (!mem_cgroup_is_root(memcg))
5706 page_counter_uncharge(&memcg->memsw, 1);
5707 mem_cgroup_swap_statistics(memcg, false);
5708 css_put(&memcg->css);
5713 /* for remember boot option*/
5714 #ifdef CONFIG_MEMCG_SWAP_ENABLED
5715 static int really_do_swap_account __initdata = 1;
5717 static int really_do_swap_account __initdata;
5720 static int __init enable_swap_account(char *s)
5722 if (!strcmp(s, "1"))
5723 really_do_swap_account = 1;
5724 else if (!strcmp(s, "0"))
5725 really_do_swap_account = 0;
5728 __setup("swapaccount=", enable_swap_account);
5730 static struct cftype memsw_cgroup_files[] = {
5732 .name = "memsw.usage_in_bytes",
5733 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
5734 .read_u64 = mem_cgroup_read_u64,
5737 .name = "memsw.max_usage_in_bytes",
5738 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
5739 .write = mem_cgroup_reset,
5740 .read_u64 = mem_cgroup_read_u64,
5743 .name = "memsw.limit_in_bytes",
5744 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
5745 .write = mem_cgroup_write,
5746 .read_u64 = mem_cgroup_read_u64,
5749 .name = "memsw.failcnt",
5750 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
5751 .write = mem_cgroup_reset,
5752 .read_u64 = mem_cgroup_read_u64,
5754 { }, /* terminate */
5757 static int __init mem_cgroup_swap_init(void)
5759 if (!mem_cgroup_disabled() && really_do_swap_account) {
5760 do_swap_account = 1;
5761 WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
5762 memsw_cgroup_files));
5766 subsys_initcall(mem_cgroup_swap_init);
5768 #endif /* CONFIG_MEMCG_SWAP */