2 * SPDX-License-Identifier: MIT
4 * Copyright © 2008-2015 Intel Corporation
8 #include <linux/sched/mm.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/slab.h>
11 #include <linux/swap.h>
12 #include <linux/pci.h>
13 #include <linux/dma-buf.h>
14 #include <linux/vmalloc.h>
16 #include "gt/intel_gt_requests.h"
18 #include "dma_resv_utils.h"
19 #include "i915_trace.h"
21 static bool swap_available(void)
23 return get_nr_swap_pages() > 0;
26 static bool can_release_pages(struct drm_i915_gem_object *obj)
28 /* Consider only shrinkable ojects. */
29 if (!i915_gem_object_is_shrinkable(obj))
33 * We can only return physical pages to the system if we can either
34 * discard the contents (because the user has marked them as being
35 * purgeable) or if we can move their contents out to swap.
37 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
40 static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
41 unsigned long shrink, bool trylock_vm)
46 if (shrink & I915_SHRINK_ACTIVE)
47 flags |= I915_GEM_OBJECT_UNBIND_ACTIVE;
48 if (!(shrink & I915_SHRINK_BOUND))
49 flags |= I915_GEM_OBJECT_UNBIND_TEST;
51 flags |= I915_GEM_OBJECT_UNBIND_VM_TRYLOCK;
53 if (i915_gem_object_unbind(obj, flags) == 0)
59 static void try_to_writeback(struct drm_i915_gem_object *obj,
62 switch (obj->mm.madv) {
63 case I915_MADV_DONTNEED:
64 i915_gem_object_truncate(obj);
65 case __I915_MADV_PURGED:
69 if (flags & I915_SHRINK_WRITEBACK)
70 i915_gem_object_writeback(obj);
74 * i915_gem_shrink - Shrink buffer object caches
75 * @ww: i915 gem ww acquire ctx, or NULL
77 * @target: amount of memory to make available, in pages
78 * @nr_scanned: optional output for number of pages scanned (incremental)
79 * @shrink: control flags for selecting cache types
81 * This function is the main interface to the shrinker. It will try to release
82 * up to @target pages of main memory backing storage from buffer objects.
83 * Selection of the specific caches can be done with @flags. This is e.g. useful
84 * when purgeable objects should be removed from caches preferentially.
86 * Note that it's not guaranteed that released amount is actually available as
87 * free system memory - the pages might still be in-used to due to other reasons
88 * (like cpu mmaps) or the mm core has reused them before we could grab them.
89 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
90 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
92 * Also note that any kind of pinning (both per-vma address space pins and
93 * backing storage pins at the buffer object level) result in the shrinker code
94 * having to skip the object.
97 * The number of pages of backing storage actually released.
100 i915_gem_shrink(struct i915_gem_ww_ctx *ww,
101 struct drm_i915_private *i915,
102 unsigned long target,
103 unsigned long *nr_scanned,
107 struct list_head *list;
110 { &i915->mm.purge_list, ~0u },
112 &i915->mm.shrink_list,
113 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND
117 intel_wakeref_t wakeref = 0;
118 unsigned long count = 0;
119 unsigned long scanned = 0;
122 /* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */
123 bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
125 trace_i915_gem_shrink(i915, target, shrink);
128 * Unbinding of objects will require HW access; Let us not wake the
129 * device just to recover a little memory. If absolutely necessary,
130 * we will force the wake during oom-notifier.
132 if (shrink & I915_SHRINK_BOUND) {
133 wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
135 shrink &= ~I915_SHRINK_BOUND;
139 * When shrinking the active list, we should also consider active
140 * contexts. Active contexts are pinned until they are retired, and
141 * so can not be simply unbound to retire and unpin their pages. To
142 * shrink the contexts, we must wait until the gpu is idle and
143 * completed its switch to the kernel context. In short, we do
144 * not have a good mechanism for idling a specific context, but
145 * what we can do is give them a kick so that we do not keep idle
146 * contexts around longer than is necessary.
148 if (shrink & I915_SHRINK_ACTIVE)
149 /* Retire requests to unpin all idle contexts */
150 intel_gt_retire_requests(&i915->gt);
153 * As we may completely rewrite the (un)bound list whilst unbinding
154 * (due to retiring requests) we have to strictly process only
155 * one element of the list at the time, and recheck the list
156 * on every iteration.
158 * In particular, we must hold a reference whilst removing the
159 * object as we may end up waiting for and/or retiring the objects.
160 * This might release the final reference (held by the active list)
161 * and result in the object being freed from under us. This is
162 * similar to the precautions the eviction code must take whilst
165 * Also note that although these lists do not hold a reference to
166 * the object we can safely grab one here: The final object
167 * unreferencing and the bound_list are both protected by the
168 * dev->struct_mutex and so we won't ever be able to observe an
169 * object on the bound_list with a reference count equals 0.
171 for (phase = phases; phase->list; phase++) {
172 struct list_head still_in_list;
173 struct drm_i915_gem_object *obj;
176 if ((shrink & phase->bit) == 0)
179 INIT_LIST_HEAD(&still_in_list);
182 * We serialize our access to unreferenced objects through
183 * the use of the struct_mutex. While the objects are not
184 * yet freed (due to RCU then a workqueue) we still want
185 * to be able to shrink their pages, so they remain on
186 * the unbound/bound list until actually freed.
188 spin_lock_irqsave(&i915->mm.obj_lock, flags);
189 while (count < target &&
190 (obj = list_first_entry_or_null(phase->list,
193 list_move_tail(&obj->mm.link, &still_in_list);
195 if (shrink & I915_SHRINK_VMAPS &&
196 !is_vmalloc_addr(obj->mm.mapping))
199 if (!(shrink & I915_SHRINK_ACTIVE) &&
200 i915_gem_object_is_framebuffer(obj))
203 if (!can_release_pages(obj))
206 if (!kref_get_unless_zero(&obj->base.refcount))
209 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
212 if (unsafe_drop_pages(obj, shrink, trylock_vm)) {
213 /* May arrive from get_pages on another bo */
215 if (!i915_gem_object_trylock(obj))
218 err = i915_gem_object_lock(obj, ww);
223 if (!__i915_gem_object_put_pages(obj)) {
224 try_to_writeback(obj, shrink);
225 count += obj->base.size >> PAGE_SHIFT;
228 i915_gem_object_unlock(obj);
231 dma_resv_prune(obj->base.resv);
233 scanned += obj->base.size >> PAGE_SHIFT;
235 i915_gem_object_put(obj);
237 spin_lock_irqsave(&i915->mm.obj_lock, flags);
241 list_splice_tail(&still_in_list, phase->list);
242 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
247 if (shrink & I915_SHRINK_BOUND)
248 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
251 *nr_scanned += scanned;
256 * i915_gem_shrink_all - Shrink buffer object caches completely
259 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
260 * caches completely. It also first waits for and retires all outstanding
261 * requests to also be able to release backing storage for active objects.
263 * This should only be used in code to intentionally quiescent the gpu or as a
264 * last-ditch effort when memory seems to have run out.
267 * The number of pages of backing storage actually released.
269 unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
271 intel_wakeref_t wakeref;
272 unsigned long freed = 0;
274 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
275 freed = i915_gem_shrink(NULL, i915, -1UL, NULL,
277 I915_SHRINK_UNBOUND);
284 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
286 struct drm_i915_private *i915 =
287 container_of(shrinker, struct drm_i915_private, mm.shrinker);
288 unsigned long num_objects;
291 count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT;
292 num_objects = READ_ONCE(i915->mm.shrink_count);
295 * Update our preferred vmscan batch size for the next pass.
296 * Our rough guess for an effective batch size is roughly 2
297 * available GEM objects worth of pages. That is we don't want
298 * the shrinker to fire, until it is worth the cost of freeing an
302 unsigned long avg = 2 * count / num_objects;
304 i915->mm.shrinker.batch =
305 max((i915->mm.shrinker.batch + avg) >> 1,
306 128ul /* default SHRINK_BATCH */);
313 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
315 struct drm_i915_private *i915 =
316 container_of(shrinker, struct drm_i915_private, mm.shrinker);
321 freed = i915_gem_shrink(NULL, i915,
325 I915_SHRINK_UNBOUND);
326 if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
327 intel_wakeref_t wakeref;
329 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
330 freed += i915_gem_shrink(NULL, i915,
331 sc->nr_to_scan - sc->nr_scanned,
335 I915_SHRINK_UNBOUND |
336 I915_SHRINK_WRITEBACK);
340 return sc->nr_scanned ? freed : SHRINK_STOP;
344 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
346 struct drm_i915_private *i915 =
347 container_of(nb, struct drm_i915_private, mm.oom_notifier);
348 struct drm_i915_gem_object *obj;
349 unsigned long unevictable, available, freed_pages;
350 intel_wakeref_t wakeref;
354 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
355 freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
357 I915_SHRINK_UNBOUND |
358 I915_SHRINK_WRITEBACK);
360 /* Because we may be allocating inside our own driver, we cannot
361 * assert that there are no objects with pinned pages that are not
362 * being pointed to by hardware.
364 available = unevictable = 0;
365 spin_lock_irqsave(&i915->mm.obj_lock, flags);
366 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
367 if (!can_release_pages(obj))
368 unevictable += obj->base.size >> PAGE_SHIFT;
370 available += obj->base.size >> PAGE_SHIFT;
372 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
374 if (freed_pages || available)
375 pr_info("Purging GPU memory, %lu pages freed, "
376 "%lu pages still pinned, %lu pages left available.\n",
377 freed_pages, unevictable, available);
379 *(unsigned long *)ptr += freed_pages;
384 i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
386 struct drm_i915_private *i915 =
387 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
388 struct i915_vma *vma, *next;
389 unsigned long freed_pages = 0;
390 intel_wakeref_t wakeref;
392 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
393 freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
395 I915_SHRINK_UNBOUND |
398 /* We also want to clear any cached iomaps as they wrap vmap */
399 mutex_lock(&i915->ggtt.vm.mutex);
400 list_for_each_entry_safe(vma, next,
401 &i915->ggtt.vm.bound_list, vm_link) {
402 unsigned long count = vma->node.size >> PAGE_SHIFT;
404 if (!vma->iomap || i915_vma_is_active(vma))
407 if (__i915_vma_unbind(vma) == 0)
408 freed_pages += count;
410 mutex_unlock(&i915->ggtt.vm.mutex);
412 *(unsigned long *)ptr += freed_pages;
416 void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
418 i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
419 i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
420 i915->mm.shrinker.seeks = DEFAULT_SEEKS;
421 i915->mm.shrinker.batch = 4096;
422 drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker));
424 i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
425 drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier));
427 i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
428 drm_WARN_ON(&i915->drm,
429 register_vmap_purge_notifier(&i915->mm.vmap_notifier));
432 void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
434 drm_WARN_ON(&i915->drm,
435 unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
436 drm_WARN_ON(&i915->drm,
437 unregister_oom_notifier(&i915->mm.oom_notifier));
438 unregister_shrinker(&i915->mm.shrinker);
441 void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
444 if (!IS_ENABLED(CONFIG_LOCKDEP))
447 fs_reclaim_acquire(GFP_KERNEL);
449 mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
450 mutex_release(&mutex->dep_map, _RET_IP_);
452 fs_reclaim_release(GFP_KERNEL);
455 #define obj_to_i915(obj__) to_i915((obj__)->base.dev)
457 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
459 struct drm_i915_private *i915 = obj_to_i915(obj);
463 * We can only be called while the pages are pinned or when
464 * the pages are released. If pinned, we should only be called
465 * from a single caller under controlled conditions; and on release
466 * only one caller may release us. Neither the two may cross.
468 if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
471 spin_lock_irqsave(&i915->mm.obj_lock, flags);
472 if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
473 !list_empty(&obj->mm.link)) {
474 list_del_init(&obj->mm.link);
475 i915->mm.shrink_count--;
476 i915->mm.shrink_memory -= obj->base.size;
478 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
481 static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
482 struct list_head *head)
484 struct drm_i915_private *i915 = obj_to_i915(obj);
487 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
488 if (!i915_gem_object_is_shrinkable(obj))
491 if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
494 spin_lock_irqsave(&i915->mm.obj_lock, flags);
495 GEM_BUG_ON(!kref_read(&obj->base.refcount));
496 if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
497 GEM_BUG_ON(!list_empty(&obj->mm.link));
499 list_add_tail(&obj->mm.link, head);
500 i915->mm.shrink_count++;
501 i915->mm.shrink_memory += obj->base.size;
504 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
507 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
509 __i915_gem_object_make_shrinkable(obj,
510 &obj_to_i915(obj)->mm.shrink_list);
513 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
515 __i915_gem_object_make_shrinkable(obj,
516 &obj_to_i915(obj)->mm.purge_list);