1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright 2012 Linaro Ltd.
6 * Anton Vorontsov <anton.vorontsov@linaro.org>
8 * Based on ideas from Andrew Morton, David Rientjes, KOSAKI Motohiro,
9 * Leonid Moiseichuk, Mel Gorman, Minchan Kim and Pekka Enberg.
12 #include <linux/cgroup.h>
14 #include <linux/log2.h>
15 #include <linux/sched.h>
17 #include <linux/vmstat.h>
18 #include <linux/eventfd.h>
19 #include <linux/slab.h>
20 #include <linux/swap.h>
21 #include <linux/printk.h>
22 #include <linux/vmpressure.h>
25 * The window size (vmpressure_win) is the number of scanned pages before
26 * we try to analyze scanned/reclaimed ratio. So the window is used as a
27 * rate-limit tunable for the "low" level notification, and also for
28 * averaging the ratio for medium/critical levels. Using small window
29 * sizes can cause lot of false positives, but too big window size will
30 * delay the notifications.
32 * As the vmscan reclaimer logic works with chunks which are multiple of
33 * SWAP_CLUSTER_MAX, it makes sense to use it for the window size as well.
35 * TODO: Make the window size depend on machine size, as we do for vmstat
36 * thresholds. Currently we set it to 512 pages (2MB for 4KB pages).
38 static const unsigned long vmpressure_win = SWAP_CLUSTER_MAX * 16;
41 * These thresholds are used when we account memory pressure through
42 * scanned/reclaimed ratio. The current values were chosen empirically. In
43 * essence, they are percents: the higher the value, the more number
44 * unsuccessful reclaims there were.
46 static const unsigned int vmpressure_level_med = 60;
47 static const unsigned int vmpressure_level_critical = 95;
50 * When there are too little pages left to scan, vmpressure() may miss the
51 * critical pressure as number of pages will be less than "window size".
52 * However, in that case the vmscan priority will raise fast as the
53 * reclaimer will try to scan LRUs more deeply.
55 * The vmscan logic considers these special priorities:
57 * prio == DEF_PRIORITY (12): reclaimer starts with that value
58 * prio <= DEF_PRIORITY - 2 : kswapd becomes somewhat overwhelmed
59 * prio == 0 : close to OOM, kernel scans every page in an lru
61 * Any value in this range is acceptable for this tunable (i.e. from 12 to
62 * 0). Current value for the vmpressure_level_critical_prio is chosen
63 * empirically, but the number, in essence, means that we consider
64 * critical level when scanning depth is ~10% of the lru size (vmscan
65 * scans 'lru_size >> prio' pages, so it is actually 12.5%, or one
68 static const unsigned int vmpressure_level_critical_prio = ilog2(100 / 10);
70 static struct vmpressure *work_to_vmpressure(struct work_struct *work)
72 return container_of(work, struct vmpressure, work);
75 static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr)
77 struct cgroup_subsys_state *css = vmpressure_to_css(vmpr);
78 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
80 memcg = parent_mem_cgroup(memcg);
83 return memcg_to_vmpressure(memcg);
86 enum vmpressure_levels {
90 VMPRESSURE_NUM_LEVELS,
93 enum vmpressure_modes {
94 VMPRESSURE_NO_PASSTHROUGH = 0,
100 static const char * const vmpressure_str_levels[] = {
101 [VMPRESSURE_LOW] = "low",
102 [VMPRESSURE_MEDIUM] = "medium",
103 [VMPRESSURE_CRITICAL] = "critical",
106 static const char * const vmpressure_str_modes[] = {
107 [VMPRESSURE_NO_PASSTHROUGH] = "default",
108 [VMPRESSURE_HIERARCHY] = "hierarchy",
109 [VMPRESSURE_LOCAL] = "local",
112 static enum vmpressure_levels vmpressure_level(unsigned long pressure)
114 if (pressure >= vmpressure_level_critical)
115 return VMPRESSURE_CRITICAL;
116 else if (pressure >= vmpressure_level_med)
117 return VMPRESSURE_MEDIUM;
118 return VMPRESSURE_LOW;
121 static enum vmpressure_levels vmpressure_calc_level(unsigned long scanned,
122 unsigned long reclaimed)
124 unsigned long scale = scanned + reclaimed;
125 unsigned long pressure = 0;
128 * reclaimed can be greater than scanned for things such as reclaimed
129 * slab pages. shrink_node() just adds reclaimed pages without a
130 * related increment to scanned pages.
132 if (reclaimed >= scanned)
135 * We calculate the ratio (in percents) of how many pages were
136 * scanned vs. reclaimed in a given time frame (window). Note that
137 * time is in VM reclaimer's "ticks", i.e. number of pages
138 * scanned. This makes it possible to set desired reaction time
139 * and serves as a ratelimit.
141 pressure = scale - (reclaimed * scale / scanned);
142 pressure = pressure * 100 / scale;
145 pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure,
148 return vmpressure_level(pressure);
151 struct vmpressure_event {
152 struct eventfd_ctx *efd;
153 enum vmpressure_levels level;
154 enum vmpressure_modes mode;
155 struct list_head node;
158 static bool vmpressure_event(struct vmpressure *vmpr,
159 const enum vmpressure_levels level,
160 bool ancestor, bool signalled)
162 struct vmpressure_event *ev;
165 mutex_lock(&vmpr->events_lock);
166 list_for_each_entry(ev, &vmpr->events, node) {
167 if (ancestor && ev->mode == VMPRESSURE_LOCAL)
169 if (signalled && ev->mode == VMPRESSURE_NO_PASSTHROUGH)
171 if (level < ev->level)
173 eventfd_signal(ev->efd, 1);
176 mutex_unlock(&vmpr->events_lock);
181 static void vmpressure_work_fn(struct work_struct *work)
183 struct vmpressure *vmpr = work_to_vmpressure(work);
184 unsigned long scanned;
185 unsigned long reclaimed;
186 enum vmpressure_levels level;
187 bool ancestor = false;
188 bool signalled = false;
190 spin_lock(&vmpr->sr_lock);
192 * Several contexts might be calling vmpressure(), so it is
193 * possible that the work was rescheduled again before the old
194 * work context cleared the counters. In that case we will run
195 * just after the old work returns, but then scanned might be zero
196 * here. No need for any locks here since we don't care if
197 * vmpr->reclaimed is in sync.
199 scanned = vmpr->tree_scanned;
201 spin_unlock(&vmpr->sr_lock);
205 reclaimed = vmpr->tree_reclaimed;
206 vmpr->tree_scanned = 0;
207 vmpr->tree_reclaimed = 0;
208 spin_unlock(&vmpr->sr_lock);
210 level = vmpressure_calc_level(scanned, reclaimed);
213 if (vmpressure_event(vmpr, level, ancestor, signalled))
216 } while ((vmpr = vmpressure_parent(vmpr)));
220 * vmpressure() - Account memory pressure through scanned/reclaimed ratio
221 * @gfp: reclaimer's gfp mask
222 * @memcg: cgroup memory controller handle
223 * @tree: legacy subtree mode
224 * @scanned: number of pages scanned
225 * @reclaimed: number of pages reclaimed
227 * This function should be called from the vmscan reclaim path to account
228 * "instantaneous" memory pressure (scanned/reclaimed ratio). The raw
229 * pressure index is then further refined and averaged over time.
231 * If @tree is set, vmpressure is in traditional userspace reporting
232 * mode: @memcg is considered the pressure root and userspace is
233 * notified of the entire subtree's reclaim efficiency.
235 * If @tree is not set, reclaim efficiency is recorded for @memcg, and
236 * only in-kernel users are notified.
238 * This function does not return any value.
240 void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
241 unsigned long scanned, unsigned long reclaimed)
243 struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
246 * Here we only want to account pressure that userland is able to
247 * help us with. For example, suppose that DMA zone is under
248 * pressure; if we notify userland about that kind of pressure,
249 * then it will be mostly a waste as it will trigger unnecessary
250 * freeing of memory by userland (since userland is more likely to
251 * have HIGHMEM/MOVABLE pages instead of the DMA fallback). That
252 * is why we include only movable, highmem and FS/IO pages.
253 * Indirect reclaim (kswapd) sets sc->gfp_mask to GFP_KERNEL, so
256 if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS)))
260 * If we got here with no pages scanned, then that is an indicator
261 * that reclaimer was unable to find any shrinkable LRUs at the
262 * current scanning depth. But it does not mean that we should
263 * report the critical pressure, yet. If the scanning priority
264 * (scanning depth) goes too high (deep), we will be notified
265 * through vmpressure_prio(). But so far, keep calm.
271 spin_lock(&vmpr->sr_lock);
272 scanned = vmpr->tree_scanned += scanned;
273 vmpr->tree_reclaimed += reclaimed;
274 spin_unlock(&vmpr->sr_lock);
276 if (scanned < vmpressure_win)
278 schedule_work(&vmpr->work);
280 enum vmpressure_levels level;
282 /* For now, no users for root-level efficiency */
283 if (!memcg || memcg == root_mem_cgroup)
286 spin_lock(&vmpr->sr_lock);
287 scanned = vmpr->scanned += scanned;
288 reclaimed = vmpr->reclaimed += reclaimed;
289 if (scanned < vmpressure_win) {
290 spin_unlock(&vmpr->sr_lock);
293 vmpr->scanned = vmpr->reclaimed = 0;
294 spin_unlock(&vmpr->sr_lock);
296 level = vmpressure_calc_level(scanned, reclaimed);
298 if (level > VMPRESSURE_LOW) {
300 * Let the socket buffer allocator know that
301 * we are having trouble reclaiming LRU pages.
303 * For hysteresis keep the pressure state
304 * asserted for a second in which subsequent
305 * pressure events can occur.
307 memcg->socket_pressure = jiffies + HZ;
313 * vmpressure_prio() - Account memory pressure through reclaimer priority level
314 * @gfp: reclaimer's gfp mask
315 * @memcg: cgroup memory controller handle
316 * @prio: reclaimer's priority
318 * This function should be called from the reclaim path every time when
319 * the vmscan's reclaiming priority (scanning depth) changes.
321 * This function does not return any value.
323 void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
326 * We only use prio for accounting critical level. For more info
327 * see comment for vmpressure_level_critical_prio variable above.
329 if (prio > vmpressure_level_critical_prio)
333 * OK, the prio is below the threshold, updating vmpressure
334 * information before shrinker dives into long shrinking of long
335 * range vmscan. Passing scanned = vmpressure_win, reclaimed = 0
336 * to the vmpressure() basically means that we signal 'critical'
339 vmpressure(gfp, memcg, true, vmpressure_win, 0);
342 #define MAX_VMPRESSURE_ARGS_LEN (strlen("critical") + strlen("hierarchy") + 2)
345 * vmpressure_register_event() - Bind vmpressure notifications to an eventfd
346 * @memcg: memcg that is interested in vmpressure notifications
347 * @eventfd: eventfd context to link notifications with
348 * @args: event arguments (pressure level threshold, optional mode)
350 * This function associates eventfd context with the vmpressure
351 * infrastructure, so that the notifications will be delivered to the
352 * @eventfd. The @args parameter is a comma-delimited string that denotes a
353 * pressure level threshold (one of vmpressure_str_levels, i.e. "low", "medium",
354 * or "critical") and an optional mode (one of vmpressure_str_modes, i.e.
355 * "hierarchy" or "local").
357 * To be used as memcg event method.
359 * Return: 0 on success, -ENOMEM on memory failure or -EINVAL if @args could
362 int vmpressure_register_event(struct mem_cgroup *memcg,
363 struct eventfd_ctx *eventfd, const char *args)
365 struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
366 struct vmpressure_event *ev;
367 enum vmpressure_modes mode = VMPRESSURE_NO_PASSTHROUGH;
368 enum vmpressure_levels level;
369 char *spec, *spec_orig;
373 spec_orig = spec = kstrndup(args, MAX_VMPRESSURE_ARGS_LEN, GFP_KERNEL);
379 /* Find required level */
380 token = strsep(&spec, ",");
381 ret = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
386 /* Find optional mode */
387 token = strsep(&spec, ",");
389 ret = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
395 ev = kzalloc(sizeof(*ev), GFP_KERNEL);
405 mutex_lock(&vmpr->events_lock);
406 list_add(&ev->node, &vmpr->events);
407 mutex_unlock(&vmpr->events_lock);
415 * vmpressure_unregister_event() - Unbind eventfd from vmpressure
416 * @memcg: memcg handle
417 * @eventfd: eventfd context that was used to link vmpressure with the @cg
419 * This function does internal manipulations to detach the @eventfd from
420 * the vmpressure notifications, and then frees internal resources
421 * associated with the @eventfd (but the @eventfd itself is not freed).
423 * To be used as memcg event method.
425 void vmpressure_unregister_event(struct mem_cgroup *memcg,
426 struct eventfd_ctx *eventfd)
428 struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
429 struct vmpressure_event *ev;
431 mutex_lock(&vmpr->events_lock);
432 list_for_each_entry(ev, &vmpr->events, node) {
433 if (ev->efd != eventfd)
439 mutex_unlock(&vmpr->events_lock);
443 * vmpressure_init() - Initialize vmpressure control structure
444 * @vmpr: Structure to be initialized
446 * This function should be called on every allocated vmpressure structure
449 void vmpressure_init(struct vmpressure *vmpr)
451 spin_lock_init(&vmpr->sr_lock);
452 mutex_init(&vmpr->events_lock);
453 INIT_LIST_HEAD(&vmpr->events);
454 INIT_WORK(&vmpr->work, vmpressure_work_fn);
458 * vmpressure_cleanup() - shuts down vmpressure control structure
459 * @vmpr: Structure to be cleaned up
461 * This function should be called before the structure in which it is
462 * embedded is cleaned up.
464 void vmpressure_cleanup(struct vmpressure *vmpr)
467 * Make sure there is no pending work before eventfd infrastructure
470 flush_work(&vmpr->work);