2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
29 #include <drm/i915_drm.h>
32 #include "intel_drv.h"
33 #include "i915_trace.h"
35 I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
39 static bool ggtt_is_idle(struct drm_i915_private *i915)
41 return !i915->gt.active_requests;
44 static int ggtt_flush(struct drm_i915_private *i915)
49 * Not everything in the GGTT is tracked via vma (otherwise we
50 * could evict as required with minimal stalling) so we are forced
51 * to idle the GPU and explicitly retire outstanding requests in
52 * the hopes that we can then remove contexts and the like only
53 * bound by their active reference.
55 err = i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines);
59 err = i915_gem_wait_for_idle(i915,
60 I915_WAIT_INTERRUPTIBLE |
62 MAX_SCHEDULE_TIMEOUT);
66 GEM_BUG_ON(!ggtt_is_idle(i915));
71 mark_free(struct drm_mm_scan *scan,
74 struct list_head *unwind)
76 if (i915_vma_is_pinned(vma))
79 if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma))
82 list_add(&vma->evict_link, unwind);
83 return drm_mm_scan_add_block(scan, &vma->node);
87 * i915_gem_evict_something - Evict vmas to make room for binding a new one
88 * @vm: address space to evict from
89 * @min_size: size of the desired free space
90 * @alignment: alignment constraint of the desired free space
91 * @cache_level: cache_level for the desired space
92 * @start: start (inclusive) of the range from which to evict objects
93 * @end: end (exclusive) of the range from which to evict objects
94 * @flags: additional flags to control the eviction algorithm
96 * This function will try to evict vmas until a free space satisfying the
97 * requirements is found. Callers must check first whether any such hole exists
98 * already before calling this function.
100 * This function is used by the object/vma binding code.
102 * Since this function is only used to free up virtual address space it only
103 * ignores pinned vmas, and not object where the backing storage itself is
104 * pinned. Hence obj->pages_pin_count does not protect against eviction.
106 * To clarify: This is for freeing up virtual address space, not for freeing
107 * memory in e.g. the shrinker.
110 i915_gem_evict_something(struct i915_address_space *vm,
111 u64 min_size, u64 alignment,
112 unsigned cache_level,
116 struct drm_i915_private *dev_priv = vm->i915;
117 struct drm_mm_scan scan;
118 struct list_head eviction_list;
119 struct i915_vma *vma, *next;
120 struct drm_mm_node *node;
121 enum drm_mm_insert_mode mode;
122 struct i915_vma *active;
125 lockdep_assert_held(&vm->i915->drm.struct_mutex);
126 trace_i915_gem_evict(vm, min_size, alignment, flags);
129 * The goal is to evict objects and amalgamate space in rough LRU order.
130 * Since both active and inactive objects reside on the same list,
131 * in a mix of creation and last scanned order, as we process the list
132 * we sort it into inactive/active, which keeps the active portion
133 * in a rough MRU order.
135 * The retirement sequence is thus:
136 * 1. Inactive objects (already retired, random order)
137 * 2. Active objects (will stall on unbinding, oldest scanned first)
139 mode = DRM_MM_INSERT_BEST;
140 if (flags & PIN_HIGH)
141 mode = DRM_MM_INSERT_HIGH;
142 if (flags & PIN_MAPPABLE)
143 mode = DRM_MM_INSERT_LOW;
144 drm_mm_scan_init_with_range(&scan, &vm->mm,
145 min_size, alignment, cache_level,
149 * Retire before we search the active list. Although we have
150 * reasonable accuracy in our retirement lists, we may have
151 * a stray pin (preventing eviction) that can only be resolved by
154 if (!(flags & PIN_NONBLOCK))
155 i915_retire_requests(dev_priv);
159 INIT_LIST_HEAD(&eviction_list);
160 list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) {
162 * We keep this list in a rough least-recently scanned order
163 * of active elements (inactive elements are cheap to reap).
164 * New entries are added to the end, and we move anything we
165 * scan to the end. The assumption is that the working set
166 * of applications is either steady state (and thanks to the
167 * userspace bo cache it almost always is) or volatile and
168 * frequently replaced after a frame, which are self-evicting!
169 * Given that assumption, the MRU order of the scan list is
170 * fairly static, and keeping it in least-recently scan order
173 * To notice when we complete one full cycle, we record the
174 * first active element seen, before moving it to the tail.
176 if (i915_vma_is_active(vma)) {
178 if (flags & PIN_NONBLOCK)
181 active = ERR_PTR(-EAGAIN);
184 if (active != ERR_PTR(-EAGAIN)) {
188 list_move_tail(&vma->vm_link, &vm->bound_list);
193 if (mark_free(&scan, vma, flags, &eviction_list))
197 /* Nothing found, clean up and bail out! */
198 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
199 ret = drm_mm_scan_remove_block(&scan, &vma->node);
204 * Can we unpin some objects such as idle hw contents,
205 * or pending flips? But since only the GGTT has global entries
206 * such as scanouts, rinbuffers and contexts, we can skip the
207 * purge when inspecting per-process local address spaces.
209 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
213 * Not everything in the GGTT is tracked via VMA using
214 * i915_vma_move_to_active(), otherwise we could evict as required
215 * with minimal stalling. Instead we are forced to idle the GPU and
216 * explicitly retire outstanding requests which will then remove
217 * the pinning for active objects such as contexts and ring,
218 * enabling us to evict them on the next iteration.
220 * To ensure that all user contexts are evictable, we perform
221 * a switch to the perma-pinned kernel context. This all also gives
222 * us a termination condition, when the last retired context is
223 * the kernel's there is no more we can evict.
225 if (!ggtt_is_idle(dev_priv)) {
226 if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
229 ret = ggtt_flush(dev_priv);
238 * If we still have pending pageflip completions, drop
239 * back to userspace to give our workqueues time to
240 * acquire our locks and unpin the old scanouts.
242 return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC;
245 /* drm_mm doesn't allow any other other operations while
246 * scanning, therefore store to-be-evicted objects on a
247 * temporary list and take a reference for all before
248 * calling unbind (which may remove the active reference
249 * of any of our objects, thus corrupting the list).
251 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
252 if (drm_mm_scan_remove_block(&scan, &vma->node))
255 list_del(&vma->evict_link);
258 /* Unbinding will emit any required flushes */
260 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
261 __i915_vma_unpin(vma);
263 ret = i915_vma_unbind(vma);
266 while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
267 vma = container_of(node, struct i915_vma, node);
268 ret = i915_vma_unbind(vma);
275 * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one
276 * @vm: address space to evict from
277 * @target: range (and color) to evict for
278 * @flags: additional flags to control the eviction algorithm
280 * This function will try to evict vmas that overlap the target node.
282 * To clarify: This is for freeing up virtual address space, not for freeing
283 * memory in e.g. the shrinker.
285 int i915_gem_evict_for_node(struct i915_address_space *vm,
286 struct drm_mm_node *target,
289 LIST_HEAD(eviction_list);
290 struct drm_mm_node *node;
291 u64 start = target->start;
292 u64 end = start + target->size;
293 struct i915_vma *vma, *next;
297 lockdep_assert_held(&vm->i915->drm.struct_mutex);
298 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
299 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
301 trace_i915_gem_evict_node(vm, target, flags);
303 /* Retire before we search the active list. Although we have
304 * reasonable accuracy in our retirement lists, we may have
305 * a stray pin (preventing eviction) that can only be resolved by
308 if (!(flags & PIN_NONBLOCK))
309 i915_retire_requests(vm->i915);
311 check_color = vm->mm.color_adjust;
313 /* Expand search to cover neighbouring guard pages (or lack!) */
315 start -= I915_GTT_PAGE_SIZE;
317 /* Always look at the page afterwards to avoid the end-of-GTT */
318 end += I915_GTT_PAGE_SIZE;
320 GEM_BUG_ON(start >= end);
322 drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
323 /* If we find any non-objects (!vma), we cannot evict them */
324 if (node->color == I915_COLOR_UNEVICTABLE) {
329 GEM_BUG_ON(!node->allocated);
330 vma = container_of(node, typeof(*vma), node);
332 /* If we are using coloring to insert guard pages between
333 * different cache domains within the address space, we have
334 * to check whether the objects on either side of our range
335 * abutt and conflict. If they are in conflict, then we evict
336 * those as well to make room for our guard pages.
339 if (node->start + node->size == target->start) {
340 if (node->color == target->color)
343 if (node->start == target->start + target->size) {
344 if (node->color == target->color)
349 if (flags & PIN_NONBLOCK &&
350 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
355 if (flags & PIN_NONFAULT && i915_vma_has_userfault(vma)) {
360 /* Overlap of objects in the same batch? */
361 if (i915_vma_is_pinned(vma)) {
363 if (vma->exec_flags &&
364 *vma->exec_flags & EXEC_OBJECT_PINNED)
369 /* Never show fear in the face of dragons!
371 * We cannot directly remove this node from within this
372 * iterator and as with i915_gem_evict_something() we employ
373 * the vma pin_count in order to prevent the action of
374 * unbinding one vma from freeing (by dropping its active
375 * reference) another in our eviction list.
378 list_add(&vma->evict_link, &eviction_list);
381 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
382 __i915_vma_unpin(vma);
384 ret = i915_vma_unbind(vma);
391 * i915_gem_evict_vm - Evict all idle vmas from a vm
392 * @vm: Address space to cleanse
394 * This function evicts all vmas from a vm.
396 * This is used by the execbuf code as a last-ditch effort to defragment the
399 * To clarify: This is for freeing up virtual address space, not for freeing
400 * memory in e.g. the shrinker.
402 int i915_gem_evict_vm(struct i915_address_space *vm)
404 struct list_head eviction_list;
405 struct i915_vma *vma, *next;
408 lockdep_assert_held(&vm->i915->drm.struct_mutex);
409 trace_i915_gem_evict_vm(vm);
411 /* Switch back to the default context in order to unpin
412 * the existing context objects. However, such objects only
413 * pin themselves inside the global GTT and performing the
414 * switch otherwise is ineffective.
416 if (i915_is_ggtt(vm)) {
417 ret = ggtt_flush(vm->i915);
422 INIT_LIST_HEAD(&eviction_list);
423 mutex_lock(&vm->mutex);
424 list_for_each_entry(vma, &vm->bound_list, vm_link) {
425 if (i915_vma_is_pinned(vma))
429 list_add(&vma->evict_link, &eviction_list);
431 mutex_unlock(&vm->mutex);
434 list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
435 __i915_vma_unpin(vma);
437 ret = i915_vma_unbind(vma);
442 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
443 #include "selftests/i915_gem_evict.c"