2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/oom.h>
26 #include <linux/sched/mm.h>
27 #include <linux/shmem_fs.h>
28 #include <linux/slab.h>
29 #include <linux/swap.h>
30 #include <linux/pci.h>
31 #include <linux/dma-buf.h>
32 #include <linux/vmalloc.h>
33 #include <drm/i915_drm.h>
36 #include "i915_trace.h"
38 static bool shrinker_lock(struct drm_i915_private *i915,
42 struct mutex *m = &i915->drm.struct_mutex;
44 switch (mutex_trylock_recursive(m)) {
45 case MUTEX_TRYLOCK_RECURSIVE:
49 case MUTEX_TRYLOCK_FAILED:
51 if (flags & I915_SHRINK_ACTIVE &&
52 mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0)
56 case MUTEX_TRYLOCK_SUCCESS:
64 static void shrinker_unlock(struct drm_i915_private *i915, bool unlock)
69 mutex_unlock(&i915->drm.struct_mutex);
72 static bool swap_available(void)
74 return get_nr_swap_pages() > 0;
77 static bool can_release_pages(struct drm_i915_gem_object *obj)
79 /* Consider only shrinkable ojects. */
80 if (!i915_gem_object_is_shrinkable(obj))
83 /* Only report true if by unbinding the object and putting its pages
84 * we can actually make forward progress towards freeing physical
87 * If the pages are pinned for any other reason than being bound
88 * to the GPU, simply unbinding from the GPU is not going to succeed
89 * in releasing our pin count on the pages themselves.
91 if (atomic_read(&obj->mm.pages_pin_count) > obj->bind_count)
94 /* If any vma are "permanently" pinned, it will prevent us from
95 * reclaiming the obj->mm.pages. We only allow scanout objects to claim
96 * a permanent pin, along with a few others like the context objects.
97 * To simplify the scan, and to avoid walking the list of vma under the
98 * object, we just check the count of its permanently pinned.
100 if (READ_ONCE(obj->pin_global))
103 /* We can only return physical pages to the system if we can either
104 * discard the contents (because the user has marked them as being
105 * purgeable) or if we can move their contents out to swap.
107 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
110 static bool unsafe_drop_pages(struct drm_i915_gem_object *obj)
112 if (i915_gem_object_unbind(obj) == 0)
113 __i915_gem_object_put_pages(obj, I915_MM_SHRINKER);
114 return !i915_gem_object_has_pages(obj);
118 * i915_gem_shrink - Shrink buffer object caches
120 * @target: amount of memory to make available, in pages
121 * @nr_scanned: optional output for number of pages scanned (incremental)
122 * @flags: control flags for selecting cache types
124 * This function is the main interface to the shrinker. It will try to release
125 * up to @target pages of main memory backing storage from buffer objects.
126 * Selection of the specific caches can be done with @flags. This is e.g. useful
127 * when purgeable objects should be removed from caches preferentially.
129 * Note that it's not guaranteed that released amount is actually available as
130 * free system memory - the pages might still be in-used to due to other reasons
131 * (like cpu mmaps) or the mm core has reused them before we could grab them.
132 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
133 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
135 * Also note that any kind of pinning (both per-vma address space pins and
136 * backing storage pins at the buffer object level) result in the shrinker code
137 * having to skip the object.
140 * The number of pages of backing storage actually released.
143 i915_gem_shrink(struct drm_i915_private *i915,
144 unsigned long target,
145 unsigned long *nr_scanned,
149 struct list_head *list;
152 { &i915->mm.unbound_list, I915_SHRINK_UNBOUND },
153 { &i915->mm.bound_list, I915_SHRINK_BOUND },
156 intel_wakeref_t wakeref = 0;
157 unsigned long count = 0;
158 unsigned long scanned = 0;
161 if (!shrinker_lock(i915, flags, &unlock))
165 * When shrinking the active list, also consider active contexts.
166 * Active contexts are pinned until they are retired, and so can
167 * not be simply unbound to retire and unpin their pages. To shrink
168 * the contexts, we must wait until the gpu is idle.
170 * We don't care about errors here; if we cannot wait upon the GPU,
171 * we will free as much as we can and hope to get a second chance.
173 if (flags & I915_SHRINK_ACTIVE)
174 i915_gem_wait_for_idle(i915,
176 MAX_SCHEDULE_TIMEOUT);
178 trace_i915_gem_shrink(i915, target, flags);
179 i915_retire_requests(i915);
182 * Unbinding of objects will require HW access; Let us not wake the
183 * device just to recover a little memory. If absolutely necessary,
184 * we will force the wake during oom-notifier.
186 if (flags & I915_SHRINK_BOUND) {
187 wakeref = intel_runtime_pm_get_if_in_use(i915);
189 flags &= ~I915_SHRINK_BOUND;
193 * As we may completely rewrite the (un)bound list whilst unbinding
194 * (due to retiring requests) we have to strictly process only
195 * one element of the list at the time, and recheck the list
196 * on every iteration.
198 * In particular, we must hold a reference whilst removing the
199 * object as we may end up waiting for and/or retiring the objects.
200 * This might release the final reference (held by the active list)
201 * and result in the object being freed from under us. This is
202 * similar to the precautions the eviction code must take whilst
205 * Also note that although these lists do not hold a reference to
206 * the object we can safely grab one here: The final object
207 * unreferencing and the bound_list are both protected by the
208 * dev->struct_mutex and so we won't ever be able to observe an
209 * object on the bound_list with a reference count equals 0.
211 for (phase = phases; phase->list; phase++) {
212 struct list_head still_in_list;
213 struct drm_i915_gem_object *obj;
215 if ((flags & phase->bit) == 0)
218 INIT_LIST_HEAD(&still_in_list);
221 * We serialize our access to unreferenced objects through
222 * the use of the struct_mutex. While the objects are not
223 * yet freed (due to RCU then a workqueue) we still want
224 * to be able to shrink their pages, so they remain on
225 * the unbound/bound list until actually freed.
227 spin_lock(&i915->mm.obj_lock);
228 while (count < target &&
229 (obj = list_first_entry_or_null(phase->list,
232 list_move_tail(&obj->mm.link, &still_in_list);
234 if (flags & I915_SHRINK_PURGEABLE &&
235 obj->mm.madv != I915_MADV_DONTNEED)
238 if (flags & I915_SHRINK_VMAPS &&
239 !is_vmalloc_addr(obj->mm.mapping))
242 if (!(flags & I915_SHRINK_ACTIVE) &&
243 (i915_gem_object_is_active(obj) ||
244 i915_gem_object_is_framebuffer(obj)))
247 if (!can_release_pages(obj))
250 spin_unlock(&i915->mm.obj_lock);
252 if (unsafe_drop_pages(obj)) {
253 /* May arrive from get_pages on another bo */
254 mutex_lock_nested(&obj->mm.lock,
256 if (!i915_gem_object_has_pages(obj)) {
257 __i915_gem_object_invalidate(obj);
258 count += obj->base.size >> PAGE_SHIFT;
260 mutex_unlock(&obj->mm.lock);
262 scanned += obj->base.size >> PAGE_SHIFT;
264 spin_lock(&i915->mm.obj_lock);
266 list_splice_tail(&still_in_list, phase->list);
267 spin_unlock(&i915->mm.obj_lock);
270 if (flags & I915_SHRINK_BOUND)
271 intel_runtime_pm_put(i915, wakeref);
273 i915_retire_requests(i915);
275 shrinker_unlock(i915, unlock);
278 *nr_scanned += scanned;
283 * i915_gem_shrink_all - Shrink buffer object caches completely
286 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
287 * caches completely. It also first waits for and retires all outstanding
288 * requests to also be able to release backing storage for active objects.
290 * This should only be used in code to intentionally quiescent the gpu or as a
291 * last-ditch effort when memory seems to have run out.
294 * The number of pages of backing storage actually released.
296 unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
298 intel_wakeref_t wakeref;
301 wakeref = intel_runtime_pm_get(i915);
302 freed = i915_gem_shrink(i915, -1UL, NULL,
304 I915_SHRINK_UNBOUND |
306 intel_runtime_pm_put(i915, wakeref);
312 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
314 struct drm_i915_private *i915 =
315 container_of(shrinker, struct drm_i915_private, mm.shrinker);
316 struct drm_i915_gem_object *obj;
317 unsigned long num_objects = 0;
318 unsigned long count = 0;
320 spin_lock(&i915->mm.obj_lock);
321 list_for_each_entry(obj, &i915->mm.unbound_list, mm.link)
322 if (can_release_pages(obj)) {
323 count += obj->base.size >> PAGE_SHIFT;
327 list_for_each_entry(obj, &i915->mm.bound_list, mm.link)
328 if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) {
329 count += obj->base.size >> PAGE_SHIFT;
332 spin_unlock(&i915->mm.obj_lock);
334 /* Update our preferred vmscan batch size for the next pass.
335 * Our rough guess for an effective batch size is roughly 2
336 * available GEM objects worth of pages. That is we don't want
337 * the shrinker to fire, until it is worth the cost of freeing an
341 unsigned long avg = 2 * count / num_objects;
343 i915->mm.shrinker.batch =
344 max((i915->mm.shrinker.batch + avg) >> 1,
345 128ul /* default SHRINK_BATCH */);
352 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
354 struct drm_i915_private *i915 =
355 container_of(shrinker, struct drm_i915_private, mm.shrinker);
361 if (!shrinker_lock(i915, 0, &unlock))
364 freed = i915_gem_shrink(i915,
368 I915_SHRINK_UNBOUND |
369 I915_SHRINK_PURGEABLE);
370 if (sc->nr_scanned < sc->nr_to_scan)
371 freed += i915_gem_shrink(i915,
372 sc->nr_to_scan - sc->nr_scanned,
375 I915_SHRINK_UNBOUND);
376 if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
377 intel_wakeref_t wakeref;
379 wakeref = intel_runtime_pm_get(i915);
380 freed += i915_gem_shrink(i915,
381 sc->nr_to_scan - sc->nr_scanned,
385 I915_SHRINK_UNBOUND);
386 intel_runtime_pm_put(i915, wakeref);
389 shrinker_unlock(i915, unlock);
391 return sc->nr_scanned ? freed : SHRINK_STOP;
395 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
397 struct drm_i915_private *i915 =
398 container_of(nb, struct drm_i915_private, mm.oom_notifier);
399 struct drm_i915_gem_object *obj;
400 unsigned long unevictable, bound, unbound, freed_pages;
401 intel_wakeref_t wakeref;
403 wakeref = intel_runtime_pm_get(i915);
404 freed_pages = i915_gem_shrink(i915, -1UL, NULL,
406 I915_SHRINK_UNBOUND);
407 intel_runtime_pm_put(i915, wakeref);
409 /* Because we may be allocating inside our own driver, we cannot
410 * assert that there are no objects with pinned pages that are not
411 * being pointed to by hardware.
413 unbound = bound = unevictable = 0;
414 spin_lock(&i915->mm.obj_lock);
415 list_for_each_entry(obj, &i915->mm.unbound_list, mm.link) {
416 if (!can_release_pages(obj))
417 unevictable += obj->base.size >> PAGE_SHIFT;
419 unbound += obj->base.size >> PAGE_SHIFT;
421 list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
422 if (!can_release_pages(obj))
423 unevictable += obj->base.size >> PAGE_SHIFT;
425 bound += obj->base.size >> PAGE_SHIFT;
427 spin_unlock(&i915->mm.obj_lock);
429 if (freed_pages || unbound || bound)
430 pr_info("Purging GPU memory, %lu pages freed, "
431 "%lu pages still pinned.\n",
432 freed_pages, unevictable);
434 *(unsigned long *)ptr += freed_pages;
439 i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
441 struct drm_i915_private *i915 =
442 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
443 struct i915_vma *vma, *next;
444 unsigned long freed_pages = 0;
445 intel_wakeref_t wakeref;
448 if (!shrinker_lock(i915, 0, &unlock))
451 /* Force everything onto the inactive lists */
452 if (i915_gem_wait_for_idle(i915,
454 MAX_SCHEDULE_TIMEOUT))
457 wakeref = intel_runtime_pm_get(i915);
458 freed_pages += i915_gem_shrink(i915, -1UL, NULL,
460 I915_SHRINK_UNBOUND |
462 intel_runtime_pm_put(i915, wakeref);
464 /* We also want to clear any cached iomaps as they wrap vmap */
465 list_for_each_entry_safe(vma, next,
466 &i915->ggtt.vm.inactive_list, vm_link) {
467 unsigned long count = vma->node.size >> PAGE_SHIFT;
468 if (vma->iomap && i915_vma_unbind(vma) == 0)
469 freed_pages += count;
473 shrinker_unlock(i915, unlock);
475 *(unsigned long *)ptr += freed_pages;
480 * i915_gem_shrinker_register - Register the i915 shrinker
483 * This function registers and sets up the i915 shrinker and OOM handler.
485 void i915_gem_shrinker_register(struct drm_i915_private *i915)
487 i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
488 i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
489 i915->mm.shrinker.seeks = DEFAULT_SEEKS;
490 i915->mm.shrinker.batch = 4096;
491 WARN_ON(register_shrinker(&i915->mm.shrinker));
493 i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
494 WARN_ON(register_oom_notifier(&i915->mm.oom_notifier));
496 i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
497 WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
501 * i915_gem_shrinker_unregister - Unregisters the i915 shrinker
504 * This function unregisters the i915 shrinker and OOM handler.
506 void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
508 WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
509 WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
510 unregister_shrinker(&i915->mm.shrinker);
513 void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
518 if (!IS_ENABLED(CONFIG_LOCKDEP))
521 if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) {
522 mutex_acquire(&i915->drm.struct_mutex.dep_map,
523 I915_MM_NORMAL, 0, _RET_IP_);
527 fs_reclaim_acquire(GFP_KERNEL);
530 * As we invariably rely on the struct_mutex within the shrinker,
531 * but have a complicated recursion dance, taint all the mutexes used
532 * within the shrinker with the struct_mutex. For completeness, we
533 * taint with all subclass of struct_mutex, even though we should
534 * only need tainting by I915_MM_NORMAL to catch possible ABBA
535 * deadlocks from using struct_mutex inside @mutex.
537 mutex_acquire(&i915->drm.struct_mutex.dep_map,
538 I915_MM_SHRINKER, 0, _RET_IP_);
540 mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
541 mutex_release(&mutex->dep_map, 0, _RET_IP_);
543 mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);
545 fs_reclaim_release(GFP_KERNEL);
548 mutex_release(&i915->drm.struct_mutex.dep_map, 0, _RET_IP_);