2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/sched/mm.h>
26 #include <drm/drm_gem.h>
28 #include "display/intel_frontbuffer.h"
30 #include "gt/intel_engine.h"
31 #include "gt/intel_engine_heartbeat.h"
32 #include "gt/intel_gt.h"
33 #include "gt/intel_gt_requests.h"
36 #include "i915_globals.h"
37 #include "i915_sw_fence_work.h"
38 #include "i915_trace.h"
41 static struct i915_global_vma {
42 struct i915_global base;
43 struct kmem_cache *slab_vmas;
46 struct i915_vma *i915_vma_alloc(void)
48 return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
51 void i915_vma_free(struct i915_vma *vma)
53 return kmem_cache_free(global.slab_vmas, vma);
56 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
58 #include <linux/stackdepot.h>
60 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
62 unsigned long *entries;
63 unsigned int nr_entries;
66 if (!vma->node.stack) {
67 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
68 vma->node.start, vma->node.size, reason);
72 nr_entries = stack_depot_fetch(vma->node.stack, &entries);
73 stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
74 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
75 vma->node.start, vma->node.size, reason, buf);
80 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
86 static inline struct i915_vma *active_to_vma(struct i915_active *ref)
88 return container_of(ref, typeof(struct i915_vma), active);
91 static int __i915_vma_active(struct i915_active *ref)
93 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
97 static void __i915_vma_retire(struct i915_active *ref)
99 i915_vma_put(active_to_vma(ref));
102 static struct i915_vma *
103 vma_create(struct drm_i915_gem_object *obj,
104 struct i915_address_space *vm,
105 const struct i915_ggtt_view *view)
107 struct i915_vma *pos = ERR_PTR(-E2BIG);
108 struct i915_vma *vma;
109 struct rb_node *rb, **p;
111 /* The aliasing_ppgtt should never be used directly! */
112 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
114 vma = i915_vma_alloc();
116 return ERR_PTR(-ENOMEM);
118 kref_init(&vma->ref);
119 mutex_init(&vma->pages_mutex);
120 vma->vm = i915_vm_get(vm);
121 vma->ops = &vm->vma_ops;
123 vma->resv = obj->base.resv;
124 vma->size = obj->base.size;
125 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
127 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
129 /* Declare ourselves safe for use inside shrinkers */
130 if (IS_ENABLED(CONFIG_LOCKDEP)) {
131 fs_reclaim_acquire(GFP_KERNEL);
132 might_lock(&vma->active.mutex);
133 fs_reclaim_release(GFP_KERNEL);
136 INIT_LIST_HEAD(&vma->closed_link);
138 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
139 vma->ggtt_view = *view;
140 if (view->type == I915_GGTT_VIEW_PARTIAL) {
141 GEM_BUG_ON(range_overflows_t(u64,
142 view->partial.offset,
144 obj->base.size >> PAGE_SHIFT));
145 vma->size = view->partial.size;
146 vma->size <<= PAGE_SHIFT;
147 GEM_BUG_ON(vma->size > obj->base.size);
148 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
149 vma->size = intel_rotation_info_size(&view->rotated);
150 vma->size <<= PAGE_SHIFT;
151 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
152 vma->size = intel_remapped_info_size(&view->remapped);
153 vma->size <<= PAGE_SHIFT;
157 if (unlikely(vma->size > vm->total))
160 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
162 spin_lock(&obj->vma.lock);
164 if (i915_is_ggtt(vm)) {
165 if (unlikely(overflows_type(vma->size, u32)))
168 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
169 i915_gem_object_get_tiling(obj),
170 i915_gem_object_get_stride(obj));
171 if (unlikely(vma->fence_size < vma->size || /* overflow */
172 vma->fence_size > vm->total))
175 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
177 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
178 i915_gem_object_get_tiling(obj),
179 i915_gem_object_get_stride(obj));
180 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
182 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
186 p = &obj->vma.tree.rb_node;
191 pos = rb_entry(rb, struct i915_vma, obj_node);
194 * If the view already exists in the tree, another thread
195 * already created a matching vma, so return the older instance
196 * and dispose of ours.
198 cmp = i915_vma_compare(pos, vm, view);
206 rb_link_node(&vma->obj_node, rb, p);
207 rb_insert_color(&vma->obj_node, &obj->vma.tree);
209 if (i915_vma_is_ggtt(vma))
211 * We put the GGTT vma at the start of the vma-list, followed
212 * by the ppGGTT vma. This allows us to break early when
213 * iterating over only the GGTT vma for an object, see
214 * for_each_ggtt_vma()
216 list_add(&vma->obj_link, &obj->vma.list);
218 list_add_tail(&vma->obj_link, &obj->vma.list);
220 spin_unlock(&obj->vma.lock);
225 spin_unlock(&obj->vma.lock);
232 static struct i915_vma *
233 vma_lookup(struct drm_i915_gem_object *obj,
234 struct i915_address_space *vm,
235 const struct i915_ggtt_view *view)
239 rb = obj->vma.tree.rb_node;
241 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
244 cmp = i915_vma_compare(vma, vm, view);
258 * i915_vma_instance - return the singleton instance of the VMA
259 * @obj: parent &struct drm_i915_gem_object to be mapped
260 * @vm: address space in which the mapping is located
261 * @view: additional mapping requirements
263 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
264 * the same @view characteristics. If a match is not found, one is created.
265 * Once created, the VMA is kept until either the object is freed, or the
266 * address space is closed.
268 * Returns the vma, or an error pointer.
271 i915_vma_instance(struct drm_i915_gem_object *obj,
272 struct i915_address_space *vm,
273 const struct i915_ggtt_view *view)
275 struct i915_vma *vma;
277 GEM_BUG_ON(view && !i915_is_ggtt(vm));
278 GEM_BUG_ON(!atomic_read(&vm->open));
280 spin_lock(&obj->vma.lock);
281 vma = vma_lookup(obj, vm, view);
282 spin_unlock(&obj->vma.lock);
284 /* vma_create() will resolve the race if another creates the vma */
286 vma = vma_create(obj, vm, view);
288 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
292 struct i915_vma_work {
293 struct dma_fence_work base;
294 struct i915_address_space *vm;
295 struct i915_vm_pt_stash stash;
296 struct i915_vma *vma;
297 struct drm_i915_gem_object *pinned;
298 struct i915_sw_dma_fence_cb cb;
299 enum i915_cache_level cache_level;
303 static int __vma_bind(struct dma_fence_work *work)
305 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
306 struct i915_vma *vma = vw->vma;
308 vma->ops->bind_vma(vw->vm, &vw->stash,
309 vma, vw->cache_level, vw->flags);
313 static void __vma_release(struct dma_fence_work *work)
315 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
318 __i915_gem_object_unpin_pages(vw->pinned);
320 i915_vm_free_pt_stash(vw->vm, &vw->stash);
324 static const struct dma_fence_work_ops bind_ops = {
327 .release = __vma_release,
330 struct i915_vma_work *i915_vma_work(void)
332 struct i915_vma_work *vw;
334 vw = kzalloc(sizeof(*vw), GFP_KERNEL);
338 dma_fence_work_init(&vw->base, &bind_ops);
339 vw->base.dma.error = -EAGAIN; /* disable the worker by default */
344 int i915_vma_wait_for_bind(struct i915_vma *vma)
348 if (rcu_access_pointer(vma->active.excl.fence)) {
349 struct dma_fence *fence;
352 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
355 err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
356 dma_fence_put(fence);
364 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
366 * @cache_level: mapping cache level
367 * @flags: flags like global or local mapping
368 * @work: preallocated worker for allocating and binding the PTE
370 * DMA addresses are taken from the scatter-gather table of this object (or of
371 * this VMA in case of non-default GGTT views) and PTE entries set up.
372 * Note that DMA addresses are also the only part of the SG table we care about.
374 int i915_vma_bind(struct i915_vma *vma,
375 enum i915_cache_level cache_level,
377 struct i915_vma_work *work)
382 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
383 GEM_BUG_ON(vma->size > vma->node.size);
385 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
390 if (GEM_DEBUG_WARN_ON(!flags))
394 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
396 vma_flags = atomic_read(&vma->flags);
397 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
399 bind_flags &= ~vma_flags;
403 GEM_BUG_ON(!vma->pages);
405 trace_i915_vma_bind(vma, bind_flags);
406 if (work && bind_flags & vma->vm->bind_async_flags) {
407 struct dma_fence *prev;
410 work->cache_level = cache_level;
411 work->flags = bind_flags;
414 * Note we only want to chain up to the migration fence on
415 * the pages (not the object itself). As we don't track that,
416 * yet, we have to use the exclusive fence instead.
418 * Also note that we do not want to track the async vma as
419 * part of the obj->resv->excl_fence as it only affects
420 * execution and not content or object's backing store lifetime.
422 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
424 __i915_sw_fence_await_dma_fence(&work->base.chain,
430 work->base.dma.error = 0; /* enable the queue_work() */
433 __i915_gem_object_pin_pages(vma->obj);
434 work->pinned = vma->obj;
437 vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
440 atomic_or(bind_flags, &vma->flags);
444 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
449 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
454 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
455 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
457 ptr = READ_ONCE(vma->iomap);
459 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
467 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
468 io_mapping_unmap(ptr);
475 err = i915_vma_pin_fence(vma);
479 i915_vma_set_ggtt_write(vma);
481 /* NB Access through the GTT requires the device to be awake. */
485 __i915_vma_unpin(vma);
487 return IO_ERR_PTR(err);
490 void i915_vma_flush_writes(struct i915_vma *vma)
492 if (i915_vma_unset_ggtt_write(vma))
493 intel_gt_flush_ggtt_writes(vma->vm->gt);
496 void i915_vma_unpin_iomap(struct i915_vma *vma)
498 GEM_BUG_ON(vma->iomap == NULL);
500 i915_vma_flush_writes(vma);
502 i915_vma_unpin_fence(vma);
506 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
508 struct i915_vma *vma;
509 struct drm_i915_gem_object *obj;
511 vma = fetch_and_zero(p_vma);
520 if (flags & I915_VMA_RELEASE_MAP)
521 i915_gem_object_unpin_map(obj);
523 i915_gem_object_put(obj);
526 bool i915_vma_misplaced(const struct i915_vma *vma,
527 u64 size, u64 alignment, u64 flags)
529 if (!drm_mm_node_allocated(&vma->node))
532 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
535 if (vma->node.size < size)
538 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
539 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
542 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
545 if (flags & PIN_OFFSET_BIAS &&
546 vma->node.start < (flags & PIN_OFFSET_MASK))
549 if (flags & PIN_OFFSET_FIXED &&
550 vma->node.start != (flags & PIN_OFFSET_MASK))
556 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
558 bool mappable, fenceable;
560 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
561 GEM_BUG_ON(!vma->fence_size);
563 fenceable = (vma->node.size >= vma->fence_size &&
564 IS_ALIGNED(vma->node.start, vma->fence_alignment));
566 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
568 if (mappable && fenceable)
569 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
571 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
574 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
576 struct drm_mm_node *node = &vma->node;
577 struct drm_mm_node *other;
580 * On some machines we have to be careful when putting differing types
581 * of snoopable memory together to avoid the prefetcher crossing memory
582 * domains and dying. During vm initialisation, we decide whether or not
583 * these constraints apply and set the drm_mm.color_adjust
586 if (!i915_vm_has_cache_coloring(vma->vm))
589 /* Only valid to be called on an already inserted vma */
590 GEM_BUG_ON(!drm_mm_node_allocated(node));
591 GEM_BUG_ON(list_empty(&node->node_list));
593 other = list_prev_entry(node, node_list);
594 if (i915_node_color_differs(other, color) &&
595 !drm_mm_hole_follows(other))
598 other = list_next_entry(node, node_list);
599 if (i915_node_color_differs(other, color) &&
600 !drm_mm_hole_follows(node))
607 * i915_vma_insert - finds a slot for the vma in its address space
609 * @size: requested size in bytes (can be larger than the VMA)
610 * @alignment: required alignment
611 * @flags: mask of PIN_* flags to use
613 * First we try to allocate some free space that meets the requirements for
614 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
615 * preferrably the oldest idle entry to make room for the new VMA.
618 * 0 on success, negative error code otherwise.
621 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
627 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
628 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
630 size = max(size, vma->size);
631 alignment = max(alignment, vma->display_alignment);
632 if (flags & PIN_MAPPABLE) {
633 size = max_t(typeof(size), size, vma->fence_size);
634 alignment = max_t(typeof(alignment),
635 alignment, vma->fence_alignment);
638 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
639 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
640 GEM_BUG_ON(!is_power_of_2(alignment));
642 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
643 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
645 end = vma->vm->total;
646 if (flags & PIN_MAPPABLE)
647 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
648 if (flags & PIN_ZONE_4G)
649 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
650 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
652 /* If binding the object/GGTT view requires more space than the entire
653 * aperture has, reject it early before evicting everything in a vain
654 * attempt to find space.
657 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
658 size, flags & PIN_MAPPABLE ? "mappable" : "total",
664 if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
665 color = vma->obj->cache_level;
667 if (flags & PIN_OFFSET_FIXED) {
668 u64 offset = flags & PIN_OFFSET_MASK;
669 if (!IS_ALIGNED(offset, alignment) ||
670 range_overflows(offset, size, end))
673 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
680 * We only support huge gtt pages through the 48b PPGTT,
681 * however we also don't want to force any alignment for
682 * objects which need to be tightly packed into the low 32bits.
684 * Note that we assume that GGTT are limited to 4GiB for the
685 * forseeable future. See also i915_ggtt_offset().
687 if (upper_32_bits(end - 1) &&
688 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
690 * We can't mix 64K and 4K PTEs in the same page-table
691 * (2M block), and so to avoid the ugliness and
692 * complexity of coloring we opt for just aligning 64K
696 rounddown_pow_of_two(vma->page_sizes.sg |
697 I915_GTT_PAGE_SIZE_2M);
700 * Check we don't expand for the limited Global GTT
701 * (mappable aperture is even more precious!). This
702 * also checks that we exclude the aliasing-ppgtt.
704 GEM_BUG_ON(i915_vma_is_ggtt(vma));
706 alignment = max(alignment, page_alignment);
708 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
709 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
712 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
713 size, alignment, color,
718 GEM_BUG_ON(vma->node.start < start);
719 GEM_BUG_ON(vma->node.start + vma->node.size > end);
721 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
722 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
724 list_add_tail(&vma->vm_link, &vma->vm->bound_list);
730 i915_vma_detach(struct i915_vma *vma)
732 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
733 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
736 * And finally now the object is completely decoupled from this
737 * vma, we can drop its hold on the backing storage and allow
738 * it to be reaped by the shrinker.
740 list_del(&vma->vm_link);
743 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
748 bound = atomic_read(&vma->flags);
750 if (unlikely(flags & ~bound))
753 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
756 if (!(bound & I915_VMA_PIN_MASK))
759 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
760 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
766 * If pin_count==0, but we are bound, check under the lock to avoid
767 * racing with a concurrent i915_vma_unbind().
769 mutex_lock(&vma->vm->mutex);
771 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
776 if (unlikely(flags & ~bound)) {
780 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
781 mutex_unlock(&vma->vm->mutex);
786 static int vma_get_pages(struct i915_vma *vma)
790 if (atomic_add_unless(&vma->pages_count, 1, 0))
793 /* Allocations ahoy! */
794 if (mutex_lock_interruptible(&vma->pages_mutex))
797 if (!atomic_read(&vma->pages_count)) {
799 err = i915_gem_object_pin_pages(vma->obj);
804 err = vma->ops->set_pages(vma);
807 i915_gem_object_unpin_pages(vma->obj);
811 atomic_inc(&vma->pages_count);
814 mutex_unlock(&vma->pages_mutex);
819 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
821 /* We allocate under vma_get_pages, so beware the shrinker */
822 mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
823 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
824 if (atomic_sub_return(count, &vma->pages_count) == 0) {
825 vma->ops->clear_pages(vma);
826 GEM_BUG_ON(vma->pages);
828 i915_gem_object_unpin_pages(vma->obj);
830 mutex_unlock(&vma->pages_mutex);
833 static void vma_put_pages(struct i915_vma *vma)
835 if (atomic_add_unless(&vma->pages_count, -1, 1))
838 __vma_put_pages(vma, 1);
841 static void vma_unbind_pages(struct i915_vma *vma)
845 lockdep_assert_held(&vma->vm->mutex);
847 /* The upper portion of pages_count is the number of bindings */
848 count = atomic_read(&vma->pages_count);
849 count >>= I915_VMA_PAGES_BIAS;
852 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
855 int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
856 u64 size, u64 alignment, u64 flags)
858 struct i915_vma_work *work = NULL;
859 intel_wakeref_t wakeref = 0;
863 #ifdef CONFIG_PROVE_LOCKING
864 if (debug_locks && lockdep_is_held(&vma->vm->i915->drm.struct_mutex))
868 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
869 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
871 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
873 /* First try and grab the pin without rebinding the vma */
874 if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
877 err = vma_get_pages(vma);
881 if (flags & PIN_GLOBAL)
882 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
884 if (flags & vma->vm->bind_async_flags) {
885 work = i915_vma_work();
891 work->vm = i915_vm_get(vma->vm);
893 /* Allocate enough page directories to used PTE */
894 if (vma->vm->allocate_va_range) {
895 err = i915_vm_alloc_pt_stash(vma->vm,
901 err = i915_vm_pin_pt_stash(vma->vm,
909 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
911 * We conflate the Global GTT with the user's vma when using the
912 * aliasing-ppgtt, but it is still vitally important to try and
913 * keep the use cases distinct. For example, userptr objects are
914 * not allowed inside the Global GTT as that will cause lock
915 * inversions when we have to evict them the mmu_notifier callbacks -
916 * but they are allowed to be part of the user ppGTT which can never
917 * be mapped. As such we try to give the distinct users of the same
918 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
919 * and i915_ppgtt separate].
921 * NB this may cause us to mask real lock inversions -- while the
922 * code is safe today, lockdep may not be able to spot future
925 err = mutex_lock_interruptible_nested(&vma->vm->mutex,
926 !(flags & PIN_GLOBAL));
930 /* No more allocations allowed now we hold vm->mutex */
932 if (unlikely(i915_vma_is_closed(vma))) {
937 bound = atomic_read(&vma->flags);
938 if (unlikely(bound & I915_VMA_ERROR)) {
943 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
944 err = -EAGAIN; /* pins are meant to be fairly temporary */
948 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
953 err = i915_active_acquire(&vma->active);
957 if (!(bound & I915_VMA_BIND_MASK)) {
958 err = i915_vma_insert(vma, size, alignment, flags);
962 if (i915_is_ggtt(vma->vm))
963 __i915_vma_set_map_and_fenceable(vma);
966 GEM_BUG_ON(!vma->pages);
967 err = i915_vma_bind(vma,
968 vma->obj ? vma->obj->cache_level : 0,
973 /* There should only be at most 2 active bindings (user, global) */
974 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
975 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
976 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
979 GEM_BUG_ON(!i915_vma_is_pinned(vma));
980 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
981 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
984 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
985 i915_vma_detach(vma);
986 drm_mm_remove_node(&vma->node);
989 i915_active_release(&vma->active);
991 mutex_unlock(&vma->vm->mutex);
994 dma_fence_work_commit_imm(&work->base);
997 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1002 static void flush_idle_contexts(struct intel_gt *gt)
1004 struct intel_engine_cs *engine;
1005 enum intel_engine_id id;
1007 for_each_engine(engine, gt, id)
1008 intel_engine_flush_barriers(engine);
1010 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1013 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1014 u32 align, unsigned int flags)
1016 struct i915_address_space *vm = vma->vm;
1019 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1022 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1023 if (err != -ENOSPC) {
1025 err = i915_vma_wait_for_bind(vma);
1027 i915_vma_unpin(vma);
1032 /* Unlike i915_vma_pin, we don't take no for an answer! */
1033 flush_idle_contexts(vm->gt);
1034 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1035 i915_gem_evict_vm(vm);
1036 mutex_unlock(&vm->mutex);
1041 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1044 * We defer actually closing, unbinding and destroying the VMA until
1045 * the next idle point, or if the object is freed in the meantime. By
1046 * postponing the unbind, we allow for it to be resurrected by the
1047 * client, avoiding the work required to rebind the VMA. This is
1048 * advantageous for DRI, where the client/server pass objects
1049 * between themselves, temporarily opening a local VMA to the
1050 * object, and then closing it again. The same object is then reused
1051 * on the next frame (or two, depending on the depth of the swap queue)
1052 * causing us to rebind the VMA once more. This ends up being a lot
1053 * of wasted work for the steady state.
1055 GEM_BUG_ON(i915_vma_is_closed(vma));
1056 list_add(&vma->closed_link, >->closed_vma);
1059 void i915_vma_close(struct i915_vma *vma)
1061 struct intel_gt *gt = vma->vm->gt;
1062 unsigned long flags;
1064 if (i915_vma_is_ggtt(vma))
1067 GEM_BUG_ON(!atomic_read(&vma->open_count));
1068 if (atomic_dec_and_lock_irqsave(&vma->open_count,
1071 __vma_close(vma, gt);
1072 spin_unlock_irqrestore(>->closed_lock, flags);
1076 static void __i915_vma_remove_closed(struct i915_vma *vma)
1078 struct intel_gt *gt = vma->vm->gt;
1080 spin_lock_irq(>->closed_lock);
1081 list_del_init(&vma->closed_link);
1082 spin_unlock_irq(>->closed_lock);
1085 void i915_vma_reopen(struct i915_vma *vma)
1087 if (i915_vma_is_closed(vma))
1088 __i915_vma_remove_closed(vma);
1091 void i915_vma_release(struct kref *ref)
1093 struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1095 if (drm_mm_node_allocated(&vma->node)) {
1096 mutex_lock(&vma->vm->mutex);
1097 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1098 WARN_ON(__i915_vma_unbind(vma));
1099 mutex_unlock(&vma->vm->mutex);
1100 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1102 GEM_BUG_ON(i915_vma_is_active(vma));
1105 struct drm_i915_gem_object *obj = vma->obj;
1107 spin_lock(&obj->vma.lock);
1108 list_del(&vma->obj_link);
1109 if (!RB_EMPTY_NODE(&vma->obj_node))
1110 rb_erase(&vma->obj_node, &obj->vma.tree);
1111 spin_unlock(&obj->vma.lock);
1114 __i915_vma_remove_closed(vma);
1115 i915_vm_put(vma->vm);
1117 i915_active_fini(&vma->active);
1121 void i915_vma_parked(struct intel_gt *gt)
1123 struct i915_vma *vma, *next;
1126 spin_lock_irq(>->closed_lock);
1127 list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) {
1128 struct drm_i915_gem_object *obj = vma->obj;
1129 struct i915_address_space *vm = vma->vm;
1131 /* XXX All to avoid keeping a reference on i915_vma itself */
1133 if (!kref_get_unless_zero(&obj->base.refcount))
1136 if (!i915_vm_tryopen(vm)) {
1137 i915_gem_object_put(obj);
1141 list_move(&vma->closed_link, &closed);
1143 spin_unlock_irq(>->closed_lock);
1145 /* As the GT is held idle, no vma can be reopened as we destroy them */
1146 list_for_each_entry_safe(vma, next, &closed, closed_link) {
1147 struct drm_i915_gem_object *obj = vma->obj;
1148 struct i915_address_space *vm = vma->vm;
1150 INIT_LIST_HEAD(&vma->closed_link);
1151 __i915_vma_put(vma);
1153 i915_gem_object_put(obj);
1158 static void __i915_vma_iounmap(struct i915_vma *vma)
1160 GEM_BUG_ON(i915_vma_is_pinned(vma));
1162 if (vma->iomap == NULL)
1165 io_mapping_unmap(vma->iomap);
1169 void i915_vma_revoke_mmap(struct i915_vma *vma)
1171 struct drm_vma_offset_node *node;
1174 if (!i915_vma_has_userfault(vma))
1177 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1178 GEM_BUG_ON(!vma->obj->userfault_count);
1180 node = &vma->mmo->vma_node;
1181 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1182 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1183 drm_vma_node_offset_addr(node) + vma_offset,
1187 i915_vma_unset_userfault(vma);
1188 if (!--vma->obj->userfault_count)
1189 list_del(&vma->obj->userfault_link);
1193 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1195 return __i915_request_await_exclusive(rq, &vma->active);
1198 int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1202 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1204 /* Wait for the vma to be bound before we start! */
1205 err = __i915_request_await_bind(rq, vma);
1209 return i915_active_add_request(&vma->active, rq);
1212 int i915_vma_move_to_active(struct i915_vma *vma,
1213 struct i915_request *rq,
1216 struct drm_i915_gem_object *obj = vma->obj;
1219 assert_object_held(obj);
1221 err = __i915_vma_move_to_active(vma, rq);
1225 if (flags & EXEC_OBJECT_WRITE) {
1226 struct intel_frontbuffer *front;
1228 front = __intel_frontbuffer_get(obj);
1229 if (unlikely(front)) {
1230 if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1231 i915_active_add_request(&front->write, rq);
1232 intel_frontbuffer_put(front);
1235 dma_resv_add_excl_fence(vma->resv, &rq->fence);
1236 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1237 obj->read_domains = 0;
1239 err = dma_resv_reserve_shared(vma->resv, 1);
1243 dma_resv_add_shared_fence(vma->resv, &rq->fence);
1244 obj->write_domain = 0;
1247 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1248 i915_active_add_request(&vma->fence->active, rq);
1250 obj->read_domains |= I915_GEM_GPU_DOMAINS;
1251 obj->mm.dirty = true;
1253 GEM_BUG_ON(!i915_vma_is_active(vma));
1257 void __i915_vma_evict(struct i915_vma *vma)
1259 GEM_BUG_ON(i915_vma_is_pinned(vma));
1261 if (i915_vma_is_map_and_fenceable(vma)) {
1262 /* Force a pagefault for domain tracking on next user access */
1263 i915_vma_revoke_mmap(vma);
1266 * Check that we have flushed all writes through the GGTT
1267 * before the unbind, other due to non-strict nature of those
1268 * indirect writes they may end up referencing the GGTT PTE
1271 * Note that we may be concurrently poking at the GGTT_WRITE
1272 * bit from set-domain, as we mark all GGTT vma associated
1273 * with an object. We know this is for another vma, as we
1274 * are currently unbinding this one -- so if this vma will be
1275 * reused, it will be refaulted and have its dirty bit set
1276 * before the next write.
1278 i915_vma_flush_writes(vma);
1280 /* release the fence reg _after_ flushing */
1281 i915_vma_revoke_fence(vma);
1283 __i915_vma_iounmap(vma);
1284 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1286 GEM_BUG_ON(vma->fence);
1287 GEM_BUG_ON(i915_vma_has_userfault(vma));
1289 if (likely(atomic_read(&vma->vm->open))) {
1290 trace_i915_vma_unbind(vma);
1291 vma->ops->unbind_vma(vma->vm, vma);
1293 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1296 i915_vma_detach(vma);
1297 vma_unbind_pages(vma);
1300 int __i915_vma_unbind(struct i915_vma *vma)
1304 lockdep_assert_held(&vma->vm->mutex);
1306 if (!drm_mm_node_allocated(&vma->node))
1309 if (i915_vma_is_pinned(vma)) {
1310 vma_print_allocator(vma, "is pinned");
1315 * After confirming that no one else is pinning this vma, wait for
1316 * any laggards who may have crept in during the wait (through
1317 * a residual pin skipping the vm->mutex) to complete.
1319 ret = i915_vma_sync(vma);
1323 GEM_BUG_ON(i915_vma_is_active(vma));
1324 __i915_vma_evict(vma);
1326 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
1330 int i915_vma_unbind(struct i915_vma *vma)
1332 struct i915_address_space *vm = vma->vm;
1333 intel_wakeref_t wakeref = 0;
1336 /* Optimistic wait before taking the mutex */
1337 err = i915_vma_sync(vma);
1341 if (!drm_mm_node_allocated(&vma->node))
1344 if (i915_vma_is_pinned(vma)) {
1345 vma_print_allocator(vma, "is pinned");
1349 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1350 /* XXX not always required: nop_clear_range */
1351 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1353 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
1357 err = __i915_vma_unbind(vma);
1358 mutex_unlock(&vm->mutex);
1362 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
1366 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1368 i915_gem_object_make_unshrinkable(vma->obj);
1372 void i915_vma_make_shrinkable(struct i915_vma *vma)
1374 i915_gem_object_make_shrinkable(vma->obj);
1377 void i915_vma_make_purgeable(struct i915_vma *vma)
1379 i915_gem_object_make_purgeable(vma->obj);
1382 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1383 #include "selftests/i915_vma.c"
1386 static void i915_global_vma_shrink(void)
1388 kmem_cache_shrink(global.slab_vmas);
1391 static void i915_global_vma_exit(void)
1393 kmem_cache_destroy(global.slab_vmas);
1396 static struct i915_global_vma global = { {
1397 .shrink = i915_global_vma_shrink,
1398 .exit = i915_global_vma_exit,
1401 int __init i915_global_vma_init(void)
1403 global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1404 if (!global.slab_vmas)
1407 i915_global_register(&global.base);