1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
4 * Authors: David Chinner and Glauber Costa
6 * Generic LRU infrastructure
11 #include <linux/list.h>
12 #include <linux/nodemask.h>
13 #include <linux/shrinker.h>
17 /* list_lru_walk_cb has to always return one of those */
19 LRU_REMOVED, /* item removed from list */
20 LRU_REMOVED_RETRY, /* item removed, but lock has been
21 dropped and reacquired */
22 LRU_ROTATE, /* item referenced, give another pass */
23 LRU_SKIP, /* item cannot be locked, skip */
24 LRU_RETRY, /* item not freeable. May drop the lock
25 internally, but has to return locked. */
29 struct list_head list;
30 /* may become negative during memcg reparenting */
34 struct list_lru_per_memcg {
36 /* array of per cgroup per node lists, indexed by node id */
37 struct list_lru_one node[];
40 struct list_lru_memcg {
42 /* array of per cgroup lists, indexed by memcg_cache_id */
43 struct list_lru_per_memcg __rcu *mlru[];
46 struct list_lru_node {
47 /* protects all lists on the node, including per cgroup */
49 /* global list, used for the root cgroup in cgroup aware lrus */
50 struct list_lru_one lru;
52 } ____cacheline_aligned_in_smp;
55 struct list_lru_node *node;
56 #ifdef CONFIG_MEMCG_KMEM
57 struct list_head list;
60 /* protects ->mlrus->mlru[i] */
62 /* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
63 struct list_lru_memcg __rcu *mlrus;
67 void list_lru_destroy(struct list_lru *lru);
68 int __list_lru_init(struct list_lru *lru, bool memcg_aware,
69 struct lock_class_key *key, struct shrinker *shrinker);
71 #define list_lru_init(lru) \
72 __list_lru_init((lru), false, NULL, NULL)
73 #define list_lru_init_key(lru, key) \
74 __list_lru_init((lru), false, (key), NULL)
75 #define list_lru_init_memcg(lru, shrinker) \
76 __list_lru_init((lru), true, NULL, shrinker)
78 int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
80 int memcg_update_all_list_lrus(int num_memcgs);
81 void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent);
84 * list_lru_add: add an element to the lru list's tail
85 * @list_lru: the lru pointer
86 * @item: the item to be added.
88 * If the element is already part of a list, this function returns doing
89 * nothing. Therefore the caller does not need to keep state about whether or
90 * not the element already belongs in the list and is allowed to lazy update
91 * it. Note however that this is valid for *a* list, not *this* list. If
92 * the caller organize itself in a way that elements can be in more than
93 * one type of list, it is up to the caller to fully remove the item from
94 * the previous list (with list_lru_del() for instance) before moving it
97 * Return value: true if the list was updated, false otherwise
99 bool list_lru_add(struct list_lru *lru, struct list_head *item);
102 * list_lru_del: delete an element to the lru list
103 * @list_lru: the lru pointer
104 * @item: the item to be deleted.
106 * This function works analogously as list_lru_add in terms of list
107 * manipulation. The comments about an element already pertaining to
108 * a list are also valid for list_lru_del.
110 * Return value: true if the list was updated, false otherwise
112 bool list_lru_del(struct list_lru *lru, struct list_head *item);
115 * list_lru_count_one: return the number of objects currently held by @lru
116 * @lru: the lru pointer.
117 * @nid: the node id to count from.
118 * @memcg: the cgroup to count from.
120 * Always return a non-negative number, 0 for empty lists. There is no
121 * guarantee that the list is not updated while the count is being computed.
122 * Callers that want such a guarantee need to provide an outer lock.
124 unsigned long list_lru_count_one(struct list_lru *lru,
125 int nid, struct mem_cgroup *memcg);
126 unsigned long list_lru_count_node(struct list_lru *lru, int nid);
128 static inline unsigned long list_lru_shrink_count(struct list_lru *lru,
129 struct shrink_control *sc)
131 return list_lru_count_one(lru, sc->nid, sc->memcg);
134 static inline unsigned long list_lru_count(struct list_lru *lru)
139 for_each_node_state(nid, N_NORMAL_MEMORY)
140 count += list_lru_count_node(lru, nid);
145 void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
146 void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
147 struct list_head *head);
149 typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
150 struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
153 * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
154 * @lru: the lru pointer.
155 * @nid: the node id to scan from.
156 * @memcg: the cgroup to scan from.
157 * @isolate: callback function that is responsible for deciding what to do with
158 * the item currently being scanned
159 * @cb_arg: opaque type that will be passed to @isolate
160 * @nr_to_walk: how many items to scan.
162 * This function will scan all elements in a particular list_lru, calling the
163 * @isolate callback for each of those items, along with the current list
164 * spinlock and a caller-provided opaque. The @isolate callback can choose to
165 * drop the lock internally, but *must* return with the lock held. The callback
166 * will return an enum lru_status telling the list_lru infrastructure what to
167 * do with the object being scanned.
169 * Please note that nr_to_walk does not mean how many objects will be freed,
170 * just how many objects will be scanned.
172 * Return value: the number of objects effectively removed from the LRU.
174 unsigned long list_lru_walk_one(struct list_lru *lru,
175 int nid, struct mem_cgroup *memcg,
176 list_lru_walk_cb isolate, void *cb_arg,
177 unsigned long *nr_to_walk);
179 * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
180 * @lru: the lru pointer.
181 * @nid: the node id to scan from.
182 * @memcg: the cgroup to scan from.
183 * @isolate: callback function that is responsible for deciding what to do with
184 * the item currently being scanned
185 * @cb_arg: opaque type that will be passed to @isolate
186 * @nr_to_walk: how many items to scan.
188 * Same as @list_lru_walk_one except that the spinlock is acquired with
191 unsigned long list_lru_walk_one_irq(struct list_lru *lru,
192 int nid, struct mem_cgroup *memcg,
193 list_lru_walk_cb isolate, void *cb_arg,
194 unsigned long *nr_to_walk);
195 unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
196 list_lru_walk_cb isolate, void *cb_arg,
197 unsigned long *nr_to_walk);
199 static inline unsigned long
200 list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
201 list_lru_walk_cb isolate, void *cb_arg)
203 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg,
207 static inline unsigned long
208 list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc,
209 list_lru_walk_cb isolate, void *cb_arg)
211 return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg,
215 static inline unsigned long
216 list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
217 void *cb_arg, unsigned long nr_to_walk)
222 for_each_node_state(nid, N_NORMAL_MEMORY) {
223 isolated += list_lru_walk_node(lru, nid, isolate,
224 cb_arg, &nr_to_walk);
230 #endif /* _LRU_LIST_H */