2 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
3 * Authors: David Chinner and Glauber Costa
5 * Generic LRU infrastructure
7 #include <linux/kernel.h>
8 #include <linux/module.h>
10 #include <linux/list_lru.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 #include <linux/memcontrol.h>
15 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
16 static LIST_HEAD(list_lrus);
17 static DEFINE_MUTEX(list_lrus_mutex);
19 static void list_lru_register(struct list_lru *lru)
21 mutex_lock(&list_lrus_mutex);
22 list_add(&lru->list, &list_lrus);
23 mutex_unlock(&list_lrus_mutex);
26 static void list_lru_unregister(struct list_lru *lru)
28 mutex_lock(&list_lrus_mutex);
30 mutex_unlock(&list_lrus_mutex);
33 static void list_lru_register(struct list_lru *lru)
37 static void list_lru_unregister(struct list_lru *lru)
40 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
42 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
43 static inline bool list_lru_memcg_aware(struct list_lru *lru)
46 * This needs node 0 to be always present, even
47 * in the systems supporting sparse numa ids.
49 return !!lru->node[0].memcg_lrus;
52 static inline struct list_lru_one *
53 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
55 struct list_lru_memcg *memcg_lrus;
57 * Either lock or RCU protects the array of per cgroup lists
58 * from relocation (see memcg_update_list_lru_node).
60 memcg_lrus = rcu_dereference_check(nlru->memcg_lrus,
61 lockdep_is_held(&nlru->lock));
62 if (memcg_lrus && idx >= 0)
63 return memcg_lrus->lru[idx];
67 static __always_inline struct mem_cgroup *mem_cgroup_from_kmem(void *ptr)
71 if (!memcg_kmem_enabled())
73 page = virt_to_head_page(ptr);
74 return page->mem_cgroup;
77 static inline struct list_lru_one *
78 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
80 struct mem_cgroup *memcg;
82 if (!nlru->memcg_lrus)
85 memcg = mem_cgroup_from_kmem(ptr);
89 return list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
92 static inline bool list_lru_memcg_aware(struct list_lru *lru)
97 static inline struct list_lru_one *
98 list_lru_from_memcg_idx(struct list_lru_node *nlru, int idx)
103 static inline struct list_lru_one *
104 list_lru_from_kmem(struct list_lru_node *nlru, void *ptr)
108 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
110 bool list_lru_add(struct list_lru *lru, struct list_head *item)
112 int nid = page_to_nid(virt_to_page(item));
113 struct list_lru_node *nlru = &lru->node[nid];
114 struct list_lru_one *l;
116 spin_lock(&nlru->lock);
117 if (list_empty(item)) {
118 l = list_lru_from_kmem(nlru, item);
119 list_add_tail(item, &l->list);
122 spin_unlock(&nlru->lock);
125 spin_unlock(&nlru->lock);
128 EXPORT_SYMBOL_GPL(list_lru_add);
130 bool list_lru_del(struct list_lru *lru, struct list_head *item)
132 int nid = page_to_nid(virt_to_page(item));
133 struct list_lru_node *nlru = &lru->node[nid];
134 struct list_lru_one *l;
136 spin_lock(&nlru->lock);
137 if (!list_empty(item)) {
138 l = list_lru_from_kmem(nlru, item);
142 spin_unlock(&nlru->lock);
145 spin_unlock(&nlru->lock);
148 EXPORT_SYMBOL_GPL(list_lru_del);
150 void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
155 EXPORT_SYMBOL_GPL(list_lru_isolate);
157 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
158 struct list_head *head)
160 list_move(item, head);
163 EXPORT_SYMBOL_GPL(list_lru_isolate_move);
165 unsigned long list_lru_count_one(struct list_lru *lru,
166 int nid, struct mem_cgroup *memcg)
168 struct list_lru_node *nlru = &lru->node[nid];
169 struct list_lru_one *l;
173 l = list_lru_from_memcg_idx(nlru, memcg_cache_id(memcg));
179 EXPORT_SYMBOL_GPL(list_lru_count_one);
181 unsigned long list_lru_count_node(struct list_lru *lru, int nid)
183 struct list_lru_node *nlru;
185 nlru = &lru->node[nid];
186 return nlru->nr_items;
188 EXPORT_SYMBOL_GPL(list_lru_count_node);
191 __list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx,
192 list_lru_walk_cb isolate, void *cb_arg,
193 unsigned long *nr_to_walk)
196 struct list_lru_node *nlru = &lru->node[nid];
197 struct list_lru_one *l;
198 struct list_head *item, *n;
199 unsigned long isolated = 0;
201 spin_lock(&nlru->lock);
202 l = list_lru_from_memcg_idx(nlru, memcg_idx);
204 list_for_each_safe(item, n, &l->list) {
208 * decrement nr_to_walk first so that we don't livelock if we
209 * get stuck on large numbesr of LRU_RETRY items
215 ret = isolate(item, l, &nlru->lock, cb_arg);
217 case LRU_REMOVED_RETRY:
218 assert_spin_locked(&nlru->lock);
224 * If the lru lock has been dropped, our list
225 * traversal is now invalid and so we have to
226 * restart from scratch.
228 if (ret == LRU_REMOVED_RETRY)
232 list_move_tail(item, &l->list);
238 * The lru lock has been dropped, our list traversal is
239 * now invalid and so we have to restart from scratch.
241 assert_spin_locked(&nlru->lock);
248 spin_unlock(&nlru->lock);
253 list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
254 list_lru_walk_cb isolate, void *cb_arg,
255 unsigned long *nr_to_walk)
257 return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg),
258 isolate, cb_arg, nr_to_walk);
260 EXPORT_SYMBOL_GPL(list_lru_walk_one);
262 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
263 list_lru_walk_cb isolate, void *cb_arg,
264 unsigned long *nr_to_walk)
269 isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg,
271 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) {
272 for_each_memcg_cache_index(memcg_idx) {
273 isolated += __list_lru_walk_one(lru, nid, memcg_idx,
274 isolate, cb_arg, nr_to_walk);
275 if (*nr_to_walk <= 0)
281 EXPORT_SYMBOL_GPL(list_lru_walk_node);
283 static void init_one_lru(struct list_lru_one *l)
285 INIT_LIST_HEAD(&l->list);
289 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
290 static void __memcg_destroy_list_lru_node(struct list_lru_memcg *memcg_lrus,
295 for (i = begin; i < end; i++)
296 kfree(memcg_lrus->lru[i]);
299 static int __memcg_init_list_lru_node(struct list_lru_memcg *memcg_lrus,
304 for (i = begin; i < end; i++) {
305 struct list_lru_one *l;
307 l = kmalloc(sizeof(struct list_lru_one), GFP_KERNEL);
312 memcg_lrus->lru[i] = l;
316 __memcg_destroy_list_lru_node(memcg_lrus, begin, i - 1);
320 static int memcg_init_list_lru_node(struct list_lru_node *nlru)
322 struct list_lru_memcg *memcg_lrus;
323 int size = memcg_nr_cache_ids;
325 memcg_lrus = kvmalloc(sizeof(*memcg_lrus) +
326 size * sizeof(void *), GFP_KERNEL);
330 if (__memcg_init_list_lru_node(memcg_lrus, 0, size)) {
334 RCU_INIT_POINTER(nlru->memcg_lrus, memcg_lrus);
339 static void memcg_destroy_list_lru_node(struct list_lru_node *nlru)
341 struct list_lru_memcg *memcg_lrus;
343 * This is called when shrinker has already been unregistered,
344 * and nobody can use it. So, there is no need to use kvfree_rcu().
346 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus, true);
347 __memcg_destroy_list_lru_node(memcg_lrus, 0, memcg_nr_cache_ids);
351 static void kvfree_rcu(struct rcu_head *head)
353 struct list_lru_memcg *mlru;
355 mlru = container_of(head, struct list_lru_memcg, rcu);
359 static int memcg_update_list_lru_node(struct list_lru_node *nlru,
360 int old_size, int new_size)
362 struct list_lru_memcg *old, *new;
364 BUG_ON(old_size > new_size);
366 old = rcu_dereference_protected(nlru->memcg_lrus,
367 lockdep_is_held(&list_lrus_mutex));
368 new = kvmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
372 if (__memcg_init_list_lru_node(new, old_size, new_size)) {
377 memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
380 * The locking below allows readers that hold nlru->lock avoid taking
381 * rcu_read_lock (see list_lru_from_memcg_idx).
383 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
384 * we have to use IRQ-safe primitives here to avoid deadlock.
386 spin_lock_irq(&nlru->lock);
387 rcu_assign_pointer(nlru->memcg_lrus, new);
388 spin_unlock_irq(&nlru->lock);
390 call_rcu(&old->rcu, kvfree_rcu);
394 static void memcg_cancel_update_list_lru_node(struct list_lru_node *nlru,
395 int old_size, int new_size)
397 struct list_lru_memcg *memcg_lrus;
399 memcg_lrus = rcu_dereference_protected(nlru->memcg_lrus,
400 lockdep_is_held(&list_lrus_mutex));
401 /* do not bother shrinking the array back to the old size, because we
402 * cannot handle allocation failures here */
403 __memcg_destroy_list_lru_node(memcg_lrus, old_size, new_size);
406 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
414 if (memcg_init_list_lru_node(&lru->node[i]))
419 for (i = i - 1; i >= 0; i--) {
420 if (!lru->node[i].memcg_lrus)
422 memcg_destroy_list_lru_node(&lru->node[i]);
427 static void memcg_destroy_list_lru(struct list_lru *lru)
431 if (!list_lru_memcg_aware(lru))
435 memcg_destroy_list_lru_node(&lru->node[i]);
438 static int memcg_update_list_lru(struct list_lru *lru,
439 int old_size, int new_size)
443 if (!list_lru_memcg_aware(lru))
447 if (memcg_update_list_lru_node(&lru->node[i],
453 for (i = i - 1; i >= 0; i--) {
454 if (!lru->node[i].memcg_lrus)
457 memcg_cancel_update_list_lru_node(&lru->node[i],
463 static void memcg_cancel_update_list_lru(struct list_lru *lru,
464 int old_size, int new_size)
468 if (!list_lru_memcg_aware(lru))
472 memcg_cancel_update_list_lru_node(&lru->node[i],
476 int memcg_update_all_list_lrus(int new_size)
479 struct list_lru *lru;
480 int old_size = memcg_nr_cache_ids;
482 mutex_lock(&list_lrus_mutex);
483 list_for_each_entry(lru, &list_lrus, list) {
484 ret = memcg_update_list_lru(lru, old_size, new_size);
489 mutex_unlock(&list_lrus_mutex);
492 list_for_each_entry_continue_reverse(lru, &list_lrus, list)
493 memcg_cancel_update_list_lru(lru, old_size, new_size);
497 static void memcg_drain_list_lru_node(struct list_lru_node *nlru,
498 int src_idx, int dst_idx)
500 struct list_lru_one *src, *dst;
503 * Since list_lru_{add,del} may be called under an IRQ-safe lock,
504 * we have to use IRQ-safe primitives here to avoid deadlock.
506 spin_lock_irq(&nlru->lock);
508 src = list_lru_from_memcg_idx(nlru, src_idx);
509 dst = list_lru_from_memcg_idx(nlru, dst_idx);
511 list_splice_init(&src->list, &dst->list);
512 dst->nr_items += src->nr_items;
515 spin_unlock_irq(&nlru->lock);
518 static void memcg_drain_list_lru(struct list_lru *lru,
519 int src_idx, int dst_idx)
523 if (!list_lru_memcg_aware(lru))
527 memcg_drain_list_lru_node(&lru->node[i], src_idx, dst_idx);
530 void memcg_drain_all_list_lrus(int src_idx, int dst_idx)
532 struct list_lru *lru;
534 mutex_lock(&list_lrus_mutex);
535 list_for_each_entry(lru, &list_lrus, list)
536 memcg_drain_list_lru(lru, src_idx, dst_idx);
537 mutex_unlock(&list_lrus_mutex);
540 static int memcg_init_list_lru(struct list_lru *lru, bool memcg_aware)
545 static void memcg_destroy_list_lru(struct list_lru *lru)
548 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
550 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
551 struct lock_class_key *key)
554 size_t size = sizeof(*lru->node) * nr_node_ids;
557 memcg_get_cache_ids();
559 lru->node = kzalloc(size, GFP_KERNEL);
564 spin_lock_init(&lru->node[i].lock);
566 lockdep_set_class(&lru->node[i].lock, key);
567 init_one_lru(&lru->node[i].lru);
570 err = memcg_init_list_lru(lru, memcg_aware);
573 /* Do this so a list_lru_destroy() doesn't crash: */
578 list_lru_register(lru);
580 memcg_put_cache_ids();
583 EXPORT_SYMBOL_GPL(__list_lru_init);
585 void list_lru_destroy(struct list_lru *lru)
587 /* Already destroyed or not yet initialized? */
591 memcg_get_cache_ids();
593 list_lru_unregister(lru);
595 memcg_destroy_list_lru(lru);
599 memcg_put_cache_ids();
601 EXPORT_SYMBOL_GPL(list_lru_destroy);