2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/sched/mm.h>
26 #include <drm/drm_gem.h>
28 #include "display/intel_frontbuffer.h"
29 #include "gem/i915_gem_lmem.h"
30 #include "gem/i915_gem_tiling.h"
31 #include "gt/intel_engine.h"
32 #include "gt/intel_engine_heartbeat.h"
33 #include "gt/intel_gt.h"
34 #include "gt/intel_gt_requests.h"
37 #include "i915_gem_evict.h"
38 #include "i915_sw_fence_work.h"
39 #include "i915_trace.h"
41 #include "i915_vma_resource.h"
43 static inline void assert_vma_held_evict(const struct i915_vma *vma)
46 * We may be forced to unbind when the vm is dead, to clean it up.
47 * This is the only exception to the requirement of the object lock
50 if (kref_read(&vma->vm->ref))
51 assert_object_held_shared(vma->obj);
54 static struct kmem_cache *slab_vmas;
56 static struct i915_vma *i915_vma_alloc(void)
58 return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
61 static void i915_vma_free(struct i915_vma *vma)
63 return kmem_cache_free(slab_vmas, vma);
66 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
68 #include <linux/stackdepot.h>
70 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
74 if (!vma->node.stack) {
75 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
76 vma->node.start, vma->node.size, reason);
80 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
81 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
82 vma->node.start, vma->node.size, reason, buf);
87 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
93 static inline struct i915_vma *active_to_vma(struct i915_active *ref)
95 return container_of(ref, typeof(struct i915_vma), active);
98 static int __i915_vma_active(struct i915_active *ref)
100 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
103 static void __i915_vma_retire(struct i915_active *ref)
105 i915_vma_put(active_to_vma(ref));
108 static struct i915_vma *
109 vma_create(struct drm_i915_gem_object *obj,
110 struct i915_address_space *vm,
111 const struct i915_ggtt_view *view)
113 struct i915_vma *pos = ERR_PTR(-E2BIG);
114 struct i915_vma *vma;
115 struct rb_node *rb, **p;
118 /* The aliasing_ppgtt should never be used directly! */
119 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
121 vma = i915_vma_alloc();
123 return ERR_PTR(-ENOMEM);
125 vma->ops = &vm->vma_ops;
127 vma->size = obj->base.size;
128 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
130 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
132 /* Declare ourselves safe for use inside shrinkers */
133 if (IS_ENABLED(CONFIG_LOCKDEP)) {
134 fs_reclaim_acquire(GFP_KERNEL);
135 might_lock(&vma->active.mutex);
136 fs_reclaim_release(GFP_KERNEL);
139 INIT_LIST_HEAD(&vma->closed_link);
140 INIT_LIST_HEAD(&vma->obj_link);
141 RB_CLEAR_NODE(&vma->obj_node);
143 if (view && view->type != I915_GGTT_VIEW_NORMAL) {
144 vma->ggtt_view = *view;
145 if (view->type == I915_GGTT_VIEW_PARTIAL) {
146 GEM_BUG_ON(range_overflows_t(u64,
147 view->partial.offset,
149 obj->base.size >> PAGE_SHIFT));
150 vma->size = view->partial.size;
151 vma->size <<= PAGE_SHIFT;
152 GEM_BUG_ON(vma->size > obj->base.size);
153 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
154 vma->size = intel_rotation_info_size(&view->rotated);
155 vma->size <<= PAGE_SHIFT;
156 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
157 vma->size = intel_remapped_info_size(&view->remapped);
158 vma->size <<= PAGE_SHIFT;
162 if (unlikely(vma->size > vm->total))
165 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
167 err = mutex_lock_interruptible(&vm->mutex);
174 list_add_tail(&vma->vm_link, &vm->unbound_list);
176 spin_lock(&obj->vma.lock);
177 if (i915_is_ggtt(vm)) {
178 if (unlikely(overflows_type(vma->size, u32)))
181 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
182 i915_gem_object_get_tiling(obj),
183 i915_gem_object_get_stride(obj));
184 if (unlikely(vma->fence_size < vma->size || /* overflow */
185 vma->fence_size > vm->total))
188 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
190 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
191 i915_gem_object_get_tiling(obj),
192 i915_gem_object_get_stride(obj));
193 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
195 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
199 p = &obj->vma.tree.rb_node;
204 pos = rb_entry(rb, struct i915_vma, obj_node);
207 * If the view already exists in the tree, another thread
208 * already created a matching vma, so return the older instance
209 * and dispose of ours.
211 cmp = i915_vma_compare(pos, vm, view);
219 rb_link_node(&vma->obj_node, rb, p);
220 rb_insert_color(&vma->obj_node, &obj->vma.tree);
222 if (i915_vma_is_ggtt(vma))
224 * We put the GGTT vma at the start of the vma-list, followed
225 * by the ppGGTT vma. This allows us to break early when
226 * iterating over only the GGTT vma for an object, see
227 * for_each_ggtt_vma()
229 list_add(&vma->obj_link, &obj->vma.list);
231 list_add_tail(&vma->obj_link, &obj->vma.list);
233 spin_unlock(&obj->vma.lock);
234 mutex_unlock(&vm->mutex);
239 spin_unlock(&obj->vma.lock);
240 list_del_init(&vma->vm_link);
241 mutex_unlock(&vm->mutex);
247 static struct i915_vma *
248 i915_vma_lookup(struct drm_i915_gem_object *obj,
249 struct i915_address_space *vm,
250 const struct i915_ggtt_view *view)
254 rb = obj->vma.tree.rb_node;
256 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
259 cmp = i915_vma_compare(vma, vm, view);
273 * i915_vma_instance - return the singleton instance of the VMA
274 * @obj: parent &struct drm_i915_gem_object to be mapped
275 * @vm: address space in which the mapping is located
276 * @view: additional mapping requirements
278 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
279 * the same @view characteristics. If a match is not found, one is created.
280 * Once created, the VMA is kept until either the object is freed, or the
281 * address space is closed.
283 * Returns the vma, or an error pointer.
286 i915_vma_instance(struct drm_i915_gem_object *obj,
287 struct i915_address_space *vm,
288 const struct i915_ggtt_view *view)
290 struct i915_vma *vma;
292 GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
293 GEM_BUG_ON(!kref_read(&vm->ref));
295 spin_lock(&obj->vma.lock);
296 vma = i915_vma_lookup(obj, vm, view);
297 spin_unlock(&obj->vma.lock);
299 /* vma_create() will resolve the race if another creates the vma */
301 vma = vma_create(obj, vm, view);
303 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
307 struct i915_vma_work {
308 struct dma_fence_work base;
309 struct i915_address_space *vm;
310 struct i915_vm_pt_stash stash;
311 struct i915_vma_resource *vma_res;
312 struct drm_i915_gem_object *pinned;
313 struct i915_sw_dma_fence_cb cb;
314 enum i915_cache_level cache_level;
318 static void __vma_bind(struct dma_fence_work *work)
320 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
321 struct i915_vma_resource *vma_res = vw->vma_res;
323 vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
324 vma_res, vw->cache_level, vw->flags);
328 static void __vma_release(struct dma_fence_work *work)
330 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
333 i915_gem_object_put(vw->pinned);
335 i915_vm_free_pt_stash(vw->vm, &vw->stash);
337 i915_vma_resource_put(vw->vma_res);
340 static const struct dma_fence_work_ops bind_ops = {
343 .release = __vma_release,
346 struct i915_vma_work *i915_vma_work(void)
348 struct i915_vma_work *vw;
350 vw = kzalloc(sizeof(*vw), GFP_KERNEL);
354 dma_fence_work_init(&vw->base, &bind_ops);
355 vw->base.dma.error = -EAGAIN; /* disable the worker by default */
360 int i915_vma_wait_for_bind(struct i915_vma *vma)
364 if (rcu_access_pointer(vma->active.excl.fence)) {
365 struct dma_fence *fence;
368 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
371 err = dma_fence_wait(fence, true);
372 dma_fence_put(fence);
379 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
380 static int i915_vma_verify_bind_complete(struct i915_vma *vma)
382 struct dma_fence *fence = i915_active_fence_get(&vma->active.excl);
388 if (dma_fence_is_signaled(fence))
393 dma_fence_put(fence);
398 #define i915_vma_verify_bind_complete(_vma) 0
401 I915_SELFTEST_EXPORT void
402 i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
403 struct i915_vma *vma)
405 struct drm_i915_gem_object *obj = vma->obj;
407 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
408 obj->mm.rsgt, i915_gem_object_is_readonly(obj),
409 i915_gem_object_is_lmem(obj), obj->mm.region,
410 vma->ops, vma->private, vma->node.start,
411 vma->node.size, vma->size);
415 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
417 * @cache_level: mapping cache level
418 * @flags: flags like global or local mapping
419 * @work: preallocated worker for allocating and binding the PTE
420 * @vma_res: pointer to a preallocated vma resource. The resource is either
423 * DMA addresses are taken from the scatter-gather table of this object (or of
424 * this VMA in case of non-default GGTT views) and PTE entries set up.
425 * Note that DMA addresses are also the only part of the SG table we care about.
427 int i915_vma_bind(struct i915_vma *vma,
428 enum i915_cache_level cache_level,
430 struct i915_vma_work *work,
431 struct i915_vma_resource *vma_res)
437 lockdep_assert_held(&vma->vm->mutex);
438 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
439 GEM_BUG_ON(vma->size > vma->node.size);
441 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
444 i915_vma_resource_free(vma_res);
448 if (GEM_DEBUG_WARN_ON(!flags)) {
449 i915_vma_resource_free(vma_res);
454 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
456 vma_flags = atomic_read(&vma->flags);
457 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
459 bind_flags &= ~vma_flags;
460 if (bind_flags == 0) {
461 i915_vma_resource_free(vma_res);
465 GEM_BUG_ON(!atomic_read(&vma->pages_count));
467 /* Wait for or await async unbinds touching our range */
468 if (work && bind_flags & vma->vm->bind_async_flags)
469 ret = i915_vma_resource_bind_dep_await(vma->vm,
475 __GFP_RETRY_MAYFAIL |
478 ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
479 vma->node.size, true);
481 i915_vma_resource_free(vma_res);
485 if (vma->resource || !vma_res) {
486 /* Rebinding with an additional I915_VMA_*_BIND */
487 GEM_WARN_ON(!vma_flags);
488 i915_vma_resource_free(vma_res);
490 i915_vma_resource_init_from_vma(vma_res, vma);
491 vma->resource = vma_res;
493 trace_i915_vma_bind(vma, bind_flags);
494 if (work && bind_flags & vma->vm->bind_async_flags) {
495 struct dma_fence *prev;
497 work->vma_res = i915_vma_resource_get(vma->resource);
498 work->cache_level = cache_level;
499 work->flags = bind_flags;
502 * Note we only want to chain up to the migration fence on
503 * the pages (not the object itself). As we don't track that,
504 * yet, we have to use the exclusive fence instead.
506 * Also note that we do not want to track the async vma as
507 * part of the obj->resv->excl_fence as it only affects
508 * execution and not content or object's backing store lifetime.
510 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
512 __i915_sw_fence_await_dma_fence(&work->base.chain,
518 work->base.dma.error = 0; /* enable the queue_work() */
521 * If we don't have the refcounted pages list, keep a reference
522 * on the object to avoid waiting for the async bind to
523 * complete in the object destruction path.
525 if (!work->vma_res->bi.pages_rsgt)
526 work->pinned = i915_gem_object_get(vma->obj);
528 ret = i915_gem_object_wait_moving_fence(vma->obj, true);
530 i915_vma_resource_free(vma->resource);
531 vma->resource = NULL;
535 vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level,
539 set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
541 atomic_or(bind_flags, &vma->flags);
545 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
550 if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
551 return IOMEM_ERR_PTR(-EINVAL);
553 if (!i915_gem_object_is_lmem(vma->obj)) {
554 if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
560 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
561 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
562 GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
564 ptr = READ_ONCE(vma->iomap);
567 * TODO: consider just using i915_gem_object_pin_map() for lmem
568 * instead, which already supports mapping non-contiguous chunks
569 * of pages, that way we can also drop the
570 * I915_BO_ALLOC_CONTIGUOUS when allocating the object.
572 if (i915_gem_object_is_lmem(vma->obj))
573 ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
574 vma->obj->base.size);
576 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
584 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
585 io_mapping_unmap(ptr);
592 err = i915_vma_pin_fence(vma);
596 i915_vma_set_ggtt_write(vma);
598 /* NB Access through the GTT requires the device to be awake. */
602 __i915_vma_unpin(vma);
604 return IOMEM_ERR_PTR(err);
607 void i915_vma_flush_writes(struct i915_vma *vma)
609 if (i915_vma_unset_ggtt_write(vma))
610 intel_gt_flush_ggtt_writes(vma->vm->gt);
613 void i915_vma_unpin_iomap(struct i915_vma *vma)
615 GEM_BUG_ON(vma->iomap == NULL);
617 i915_vma_flush_writes(vma);
619 i915_vma_unpin_fence(vma);
623 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
625 struct i915_vma *vma;
626 struct drm_i915_gem_object *obj;
628 vma = fetch_and_zero(p_vma);
637 if (flags & I915_VMA_RELEASE_MAP)
638 i915_gem_object_unpin_map(obj);
640 i915_gem_object_put(obj);
643 bool i915_vma_misplaced(const struct i915_vma *vma,
644 u64 size, u64 alignment, u64 flags)
646 if (!drm_mm_node_allocated(&vma->node))
649 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
652 if (vma->node.size < size)
655 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
656 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
659 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
662 if (flags & PIN_OFFSET_BIAS &&
663 vma->node.start < (flags & PIN_OFFSET_MASK))
666 if (flags & PIN_OFFSET_FIXED &&
667 vma->node.start != (flags & PIN_OFFSET_MASK))
673 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
675 bool mappable, fenceable;
677 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
678 GEM_BUG_ON(!vma->fence_size);
680 fenceable = (vma->node.size >= vma->fence_size &&
681 IS_ALIGNED(vma->node.start, vma->fence_alignment));
683 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
685 if (mappable && fenceable)
686 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
688 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
691 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
693 struct drm_mm_node *node = &vma->node;
694 struct drm_mm_node *other;
697 * On some machines we have to be careful when putting differing types
698 * of snoopable memory together to avoid the prefetcher crossing memory
699 * domains and dying. During vm initialisation, we decide whether or not
700 * these constraints apply and set the drm_mm.color_adjust
703 if (!i915_vm_has_cache_coloring(vma->vm))
706 /* Only valid to be called on an already inserted vma */
707 GEM_BUG_ON(!drm_mm_node_allocated(node));
708 GEM_BUG_ON(list_empty(&node->node_list));
710 other = list_prev_entry(node, node_list);
711 if (i915_node_color_differs(other, color) &&
712 !drm_mm_hole_follows(other))
715 other = list_next_entry(node, node_list);
716 if (i915_node_color_differs(other, color) &&
717 !drm_mm_hole_follows(node))
724 * i915_vma_insert - finds a slot for the vma in its address space
726 * @size: requested size in bytes (can be larger than the VMA)
727 * @alignment: required alignment
728 * @flags: mask of PIN_* flags to use
730 * First we try to allocate some free space that meets the requirements for
731 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
732 * preferrably the oldest idle entry to make room for the new VMA.
735 * 0 on success, negative error code otherwise.
738 i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
739 u64 size, u64 alignment, u64 flags)
745 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
746 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
748 size = max(size, vma->size);
749 alignment = max(alignment, vma->display_alignment);
750 if (flags & PIN_MAPPABLE) {
751 size = max_t(typeof(size), size, vma->fence_size);
752 alignment = max_t(typeof(alignment),
753 alignment, vma->fence_alignment);
756 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
757 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
758 GEM_BUG_ON(!is_power_of_2(alignment));
760 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
761 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
763 end = vma->vm->total;
764 if (flags & PIN_MAPPABLE)
765 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
766 if (flags & PIN_ZONE_4G)
767 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
768 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
770 alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj));
772 * for compact-pt we round up the reservation to prevent
773 * any smaller pages being used within the same PDE
775 if (NEEDS_COMPACT_PT(vma->vm->i915))
776 size = round_up(size, alignment);
778 /* If binding the object/GGTT view requires more space than the entire
779 * aperture has, reject it early before evicting everything in a vain
780 * attempt to find space.
783 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
784 size, flags & PIN_MAPPABLE ? "mappable" : "total",
791 if (i915_vm_has_cache_coloring(vma->vm))
792 color = vma->obj->cache_level;
794 if (flags & PIN_OFFSET_FIXED) {
795 u64 offset = flags & PIN_OFFSET_MASK;
796 if (!IS_ALIGNED(offset, alignment) ||
797 range_overflows(offset, size, end))
800 ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
807 * We only support huge gtt pages through the 48b PPGTT,
808 * however we also don't want to force any alignment for
809 * objects which need to be tightly packed into the low 32bits.
811 * Note that we assume that GGTT are limited to 4GiB for the
812 * forseeable future. See also i915_ggtt_offset().
814 if (upper_32_bits(end - 1) &&
815 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
817 * We can't mix 64K and 4K PTEs in the same page-table
818 * (2M block), and so to avoid the ugliness and
819 * complexity of coloring we opt for just aligning 64K
823 rounddown_pow_of_two(vma->page_sizes.sg |
824 I915_GTT_PAGE_SIZE_2M);
827 * Check we don't expand for the limited Global GTT
828 * (mappable aperture is even more precious!). This
829 * also checks that we exclude the aliasing-ppgtt.
831 GEM_BUG_ON(i915_vma_is_ggtt(vma));
833 alignment = max(alignment, page_alignment);
835 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
836 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
839 ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
840 size, alignment, color,
845 GEM_BUG_ON(vma->node.start < start);
846 GEM_BUG_ON(vma->node.start + vma->node.size > end);
848 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
849 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
851 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
857 i915_vma_detach(struct i915_vma *vma)
859 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
860 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
863 * And finally now the object is completely decoupled from this
864 * vma, we can drop its hold on the backing storage and allow
865 * it to be reaped by the shrinker.
867 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
870 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
874 bound = atomic_read(&vma->flags);
876 if (flags & PIN_VALIDATE) {
877 flags &= I915_VMA_BIND_MASK;
879 return (flags & bound) == flags;
882 /* with the lock mandatory for unbind, we don't race here */
883 flags &= I915_VMA_BIND_MASK;
885 if (unlikely(flags & ~bound))
888 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
891 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
892 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
897 static struct scatterlist *
898 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
899 unsigned int width, unsigned int height,
900 unsigned int src_stride, unsigned int dst_stride,
901 struct sg_table *st, struct scatterlist *sg)
903 unsigned int column, row;
904 unsigned int src_idx;
906 for (column = 0; column < width; column++) {
909 src_idx = src_stride * (height - 1) + column + offset;
910 for (row = 0; row < height; row++) {
913 * We don't need the pages, but need to initialize
914 * the entries so the sg list can be happily traversed.
915 * The only thing we need are DMA addresses.
917 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
919 i915_gem_object_get_dma_address(obj, src_idx);
920 sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
922 src_idx -= src_stride;
925 left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
933 * The DE ignores the PTEs for the padding tiles, the sg entry
934 * here is just a conenience to indicate how many padding PTEs
935 * to insert at this spot.
937 sg_set_page(sg, NULL, left, 0);
938 sg_dma_address(sg) = 0;
939 sg_dma_len(sg) = left;
946 static noinline struct sg_table *
947 intel_rotate_pages(struct intel_rotation_info *rot_info,
948 struct drm_i915_gem_object *obj)
950 unsigned int size = intel_rotation_info_size(rot_info);
951 struct drm_i915_private *i915 = to_i915(obj->base.dev);
953 struct scatterlist *sg;
957 /* Allocate target SG list. */
958 st = kmalloc(sizeof(*st), GFP_KERNEL);
962 ret = sg_alloc_table(st, size, GFP_KERNEL);
969 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
970 sg = rotate_pages(obj, rot_info->plane[i].offset,
971 rot_info->plane[i].width, rot_info->plane[i].height,
972 rot_info->plane[i].src_stride,
973 rot_info->plane[i].dst_stride,
982 drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
983 obj->base.size, rot_info->plane[0].width,
984 rot_info->plane[0].height, size);
989 static struct scatterlist *
990 add_padding_pages(unsigned int count,
991 struct sg_table *st, struct scatterlist *sg)
996 * The DE ignores the PTEs for the padding tiles, the sg entry
997 * here is just a convenience to indicate how many padding PTEs
998 * to insert at this spot.
1000 sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0);
1001 sg_dma_address(sg) = 0;
1002 sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE;
1008 static struct scatterlist *
1009 remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
1010 unsigned int offset, unsigned int alignment_pad,
1011 unsigned int width, unsigned int height,
1012 unsigned int src_stride, unsigned int dst_stride,
1013 struct sg_table *st, struct scatterlist *sg,
1014 unsigned int *gtt_offset)
1018 if (!width || !height)
1022 sg = add_padding_pages(alignment_pad, st, sg);
1024 for (row = 0; row < height; row++) {
1025 unsigned int left = width * I915_GTT_PAGE_SIZE;
1029 unsigned int length;
1032 * We don't need the pages, but need to initialize
1033 * the entries so the sg list can be happily traversed.
1034 * The only thing we need are DMA addresses.
1037 addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
1039 length = min(left, length);
1043 sg_set_page(sg, NULL, length, 0);
1044 sg_dma_address(sg) = addr;
1045 sg_dma_len(sg) = length;
1048 offset += length / I915_GTT_PAGE_SIZE;
1052 offset += src_stride - width;
1054 left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
1059 sg = add_padding_pages(left >> PAGE_SHIFT, st, sg);
1062 *gtt_offset += alignment_pad + dst_stride * height;
1067 static struct scatterlist *
1068 remap_contiguous_pages(struct drm_i915_gem_object *obj,
1069 unsigned int obj_offset,
1071 struct sg_table *st, struct scatterlist *sg)
1073 struct scatterlist *iter;
1074 unsigned int offset;
1076 iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset);
1082 len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
1083 count << PAGE_SHIFT);
1084 sg_set_page(sg, NULL, len, 0);
1085 sg_dma_address(sg) =
1086 sg_dma_address(iter) + (offset << PAGE_SHIFT);
1087 sg_dma_len(sg) = len;
1090 count -= len >> PAGE_SHIFT;
1095 iter = __sg_next(iter);
1100 static struct scatterlist *
1101 remap_linear_color_plane_pages(struct drm_i915_gem_object *obj,
1102 unsigned int obj_offset, unsigned int alignment_pad,
1104 struct sg_table *st, struct scatterlist *sg,
1105 unsigned int *gtt_offset)
1111 sg = add_padding_pages(alignment_pad, st, sg);
1113 sg = remap_contiguous_pages(obj, obj_offset, size, st, sg);
1116 *gtt_offset += alignment_pad + size;
1121 static struct scatterlist *
1122 remap_color_plane_pages(const struct intel_remapped_info *rem_info,
1123 struct drm_i915_gem_object *obj,
1125 struct sg_table *st, struct scatterlist *sg,
1126 unsigned int *gtt_offset)
1128 unsigned int alignment_pad = 0;
1130 if (rem_info->plane_alignment)
1131 alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset;
1133 if (rem_info->plane[color_plane].linear)
1134 sg = remap_linear_color_plane_pages(obj,
1135 rem_info->plane[color_plane].offset,
1137 rem_info->plane[color_plane].size,
1142 sg = remap_tiled_color_plane_pages(obj,
1143 rem_info->plane[color_plane].offset,
1145 rem_info->plane[color_plane].width,
1146 rem_info->plane[color_plane].height,
1147 rem_info->plane[color_plane].src_stride,
1148 rem_info->plane[color_plane].dst_stride,
1155 static noinline struct sg_table *
1156 intel_remap_pages(struct intel_remapped_info *rem_info,
1157 struct drm_i915_gem_object *obj)
1159 unsigned int size = intel_remapped_info_size(rem_info);
1160 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1161 struct sg_table *st;
1162 struct scatterlist *sg;
1163 unsigned int gtt_offset = 0;
1167 /* Allocate target SG list. */
1168 st = kmalloc(sizeof(*st), GFP_KERNEL);
1172 ret = sg_alloc_table(st, size, GFP_KERNEL);
1179 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
1180 sg = remap_color_plane_pages(rem_info, obj, i, st, sg, >t_offset);
1190 drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1191 obj->base.size, rem_info->plane[0].width,
1192 rem_info->plane[0].height, size);
1194 return ERR_PTR(ret);
1197 static noinline struct sg_table *
1198 intel_partial_pages(const struct i915_ggtt_view *view,
1199 struct drm_i915_gem_object *obj)
1201 struct sg_table *st;
1202 struct scatterlist *sg;
1203 unsigned int count = view->partial.size;
1206 st = kmalloc(sizeof(*st), GFP_KERNEL);
1210 ret = sg_alloc_table(st, count, GFP_KERNEL);
1216 sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl);
1219 i915_sg_trim(st); /* Drop any unused tail entries. */
1226 return ERR_PTR(ret);
1230 __i915_vma_get_pages(struct i915_vma *vma)
1232 struct sg_table *pages;
1235 * The vma->pages are only valid within the lifespan of the borrowed
1236 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
1237 * must be the vma->pages. A simple rule is that vma->pages must only
1238 * be accessed when the obj->mm.pages are pinned.
1240 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
1242 switch (vma->ggtt_view.type) {
1244 GEM_BUG_ON(vma->ggtt_view.type);
1246 case I915_GGTT_VIEW_NORMAL:
1247 pages = vma->obj->mm.pages;
1250 case I915_GGTT_VIEW_ROTATED:
1252 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
1255 case I915_GGTT_VIEW_REMAPPED:
1257 intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
1260 case I915_GGTT_VIEW_PARTIAL:
1261 pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
1265 if (IS_ERR(pages)) {
1266 drm_err(&vma->vm->i915->drm,
1267 "Failed to get pages for VMA view type %u (%ld)!\n",
1268 vma->ggtt_view.type, PTR_ERR(pages));
1269 return PTR_ERR(pages);
1277 I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
1281 if (atomic_add_unless(&vma->pages_count, 1, 0))
1284 err = i915_gem_object_pin_pages(vma->obj);
1288 err = __i915_vma_get_pages(vma);
1292 vma->page_sizes = vma->obj->mm.page_sizes;
1293 atomic_inc(&vma->pages_count);
1298 __i915_gem_object_unpin_pages(vma->obj);
1303 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
1305 /* We allocate under vma_get_pages, so beware the shrinker */
1306 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
1308 if (atomic_sub_return(count, &vma->pages_count) == 0) {
1309 if (vma->pages != vma->obj->mm.pages) {
1310 sg_free_table(vma->pages);
1315 i915_gem_object_unpin_pages(vma->obj);
1319 I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma)
1321 if (atomic_add_unless(&vma->pages_count, -1, 1))
1324 __vma_put_pages(vma, 1);
1327 static void vma_unbind_pages(struct i915_vma *vma)
1331 lockdep_assert_held(&vma->vm->mutex);
1333 /* The upper portion of pages_count is the number of bindings */
1334 count = atomic_read(&vma->pages_count);
1335 count >>= I915_VMA_PAGES_BIAS;
1338 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
1341 int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1342 u64 size, u64 alignment, u64 flags)
1344 struct i915_vma_work *work = NULL;
1345 struct dma_fence *moving = NULL;
1346 struct i915_vma_resource *vma_res = NULL;
1347 intel_wakeref_t wakeref = 0;
1351 assert_vma_held(vma);
1354 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
1355 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
1357 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
1359 /* First try and grab the pin without rebinding the vma */
1360 if (try_qad_pin(vma, flags))
1363 err = i915_vma_get_pages(vma);
1367 if (flags & PIN_GLOBAL)
1368 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
1370 if (flags & vma->vm->bind_async_flags) {
1372 err = i915_vm_lock_objects(vma->vm, ww);
1376 work = i915_vma_work();
1384 err = i915_gem_object_get_moving_fence(vma->obj, &moving);
1388 dma_fence_work_chain(&work->base, moving);
1390 /* Allocate enough page directories to used PTE */
1391 if (vma->vm->allocate_va_range) {
1392 err = i915_vm_alloc_pt_stash(vma->vm,
1398 err = i915_vm_map_pt_stash(vma->vm, &work->stash);
1404 vma_res = i915_vma_resource_alloc();
1405 if (IS_ERR(vma_res)) {
1406 err = PTR_ERR(vma_res);
1411 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
1413 * We conflate the Global GTT with the user's vma when using the
1414 * aliasing-ppgtt, but it is still vitally important to try and
1415 * keep the use cases distinct. For example, userptr objects are
1416 * not allowed inside the Global GTT as that will cause lock
1417 * inversions when we have to evict them the mmu_notifier callbacks -
1418 * but they are allowed to be part of the user ppGTT which can never
1419 * be mapped. As such we try to give the distinct users of the same
1420 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
1421 * and i915_ppgtt separate].
1423 * NB this may cause us to mask real lock inversions -- while the
1424 * code is safe today, lockdep may not be able to spot future
1427 err = mutex_lock_interruptible_nested(&vma->vm->mutex,
1428 !(flags & PIN_GLOBAL));
1432 /* No more allocations allowed now we hold vm->mutex */
1434 if (unlikely(i915_vma_is_closed(vma))) {
1439 bound = atomic_read(&vma->flags);
1440 if (unlikely(bound & I915_VMA_ERROR)) {
1445 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
1446 err = -EAGAIN; /* pins are meant to be fairly temporary */
1450 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
1451 if (!(flags & PIN_VALIDATE))
1452 __i915_vma_pin(vma);
1456 err = i915_active_acquire(&vma->active);
1460 if (!(bound & I915_VMA_BIND_MASK)) {
1461 err = i915_vma_insert(vma, ww, size, alignment, flags);
1465 if (i915_is_ggtt(vma->vm))
1466 __i915_vma_set_map_and_fenceable(vma);
1469 GEM_BUG_ON(!vma->pages);
1470 err = i915_vma_bind(vma,
1471 vma->obj->cache_level,
1472 flags, work, vma_res);
1477 /* There should only be at most 2 active bindings (user, global) */
1478 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
1479 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
1480 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1482 if (!(flags & PIN_VALIDATE)) {
1483 __i915_vma_pin(vma);
1484 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1486 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
1487 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
1490 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
1491 i915_vma_detach(vma);
1492 drm_mm_remove_node(&vma->node);
1495 i915_active_release(&vma->active);
1497 mutex_unlock(&vma->vm->mutex);
1499 i915_vma_resource_free(vma_res);
1502 dma_fence_work_commit_imm(&work->base);
1505 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1508 dma_fence_put(moving);
1510 i915_vma_put_pages(vma);
1514 static void flush_idle_contexts(struct intel_gt *gt)
1516 struct intel_engine_cs *engine;
1517 enum intel_engine_id id;
1519 for_each_engine(engine, gt, id)
1520 intel_engine_flush_barriers(engine);
1522 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1525 static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1526 u32 align, unsigned int flags)
1528 struct i915_address_space *vm = vma->vm;
1532 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1534 if (err != -ENOSPC) {
1536 err = i915_vma_wait_for_bind(vma);
1538 i915_vma_unpin(vma);
1543 /* Unlike i915_vma_pin, we don't take no for an answer! */
1544 flush_idle_contexts(vm->gt);
1545 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1547 * We pass NULL ww here, as we don't want to unbind
1548 * locked objects when called from execbuf when pinning
1549 * is removed. This would probably regress badly.
1551 i915_gem_evict_vm(vm, NULL);
1552 mutex_unlock(&vm->mutex);
1557 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1558 u32 align, unsigned int flags)
1560 struct i915_gem_ww_ctx _ww;
1563 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1566 return __i915_ggtt_pin(vma, ww, align, flags);
1568 lockdep_assert_not_held(&vma->obj->base.resv->lock.base);
1570 for_i915_gem_ww(&_ww, err, true) {
1571 err = i915_gem_object_lock(vma->obj, &_ww);
1573 err = __i915_ggtt_pin(vma, &_ww, align, flags);
1579 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1582 * We defer actually closing, unbinding and destroying the VMA until
1583 * the next idle point, or if the object is freed in the meantime. By
1584 * postponing the unbind, we allow for it to be resurrected by the
1585 * client, avoiding the work required to rebind the VMA. This is
1586 * advantageous for DRI, where the client/server pass objects
1587 * between themselves, temporarily opening a local VMA to the
1588 * object, and then closing it again. The same object is then reused
1589 * on the next frame (or two, depending on the depth of the swap queue)
1590 * causing us to rebind the VMA once more. This ends up being a lot
1591 * of wasted work for the steady state.
1593 GEM_BUG_ON(i915_vma_is_closed(vma));
1594 list_add(&vma->closed_link, >->closed_vma);
1597 void i915_vma_close(struct i915_vma *vma)
1599 struct intel_gt *gt = vma->vm->gt;
1600 unsigned long flags;
1602 if (i915_vma_is_ggtt(vma))
1605 GEM_BUG_ON(!atomic_read(&vma->open_count));
1606 if (atomic_dec_and_lock_irqsave(&vma->open_count,
1609 __vma_close(vma, gt);
1610 spin_unlock_irqrestore(>->closed_lock, flags);
1614 static void __i915_vma_remove_closed(struct i915_vma *vma)
1616 list_del_init(&vma->closed_link);
1619 void i915_vma_reopen(struct i915_vma *vma)
1621 struct intel_gt *gt = vma->vm->gt;
1623 spin_lock_irq(>->closed_lock);
1624 if (i915_vma_is_closed(vma))
1625 __i915_vma_remove_closed(vma);
1626 spin_unlock_irq(>->closed_lock);
1629 static void force_unbind(struct i915_vma *vma)
1631 if (!drm_mm_node_allocated(&vma->node))
1634 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1635 WARN_ON(__i915_vma_unbind(vma));
1636 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1639 static void release_references(struct i915_vma *vma, bool vm_ddestroy)
1641 struct drm_i915_gem_object *obj = vma->obj;
1642 struct intel_gt *gt = vma->vm->gt;
1644 GEM_BUG_ON(i915_vma_is_active(vma));
1646 spin_lock(&obj->vma.lock);
1647 list_del(&vma->obj_link);
1648 if (!RB_EMPTY_NODE(&vma->obj_node))
1649 rb_erase(&vma->obj_node, &obj->vma.tree);
1651 spin_unlock(&obj->vma.lock);
1653 spin_lock_irq(>->closed_lock);
1654 __i915_vma_remove_closed(vma);
1655 spin_unlock_irq(>->closed_lock);
1658 i915_vm_resv_put(vma->vm);
1660 i915_active_fini(&vma->active);
1661 GEM_WARN_ON(vma->resource);
1666 * i915_vma_destroy_locked - Remove all weak reference to the vma and put
1667 * the initial reference.
1669 * This function should be called when it's decided the vma isn't needed
1670 * anymore. The caller must assure that it doesn't race with another lookup
1671 * plus destroy, typically by taking an appropriate reference.
1673 * Current callsites are
1674 * - __i915_gem_object_pages_fini()
1675 * - __i915_vm_close() - Blocks the above function by taking a reference on
1677 * - __i915_vma_parked() - Blocks the above functions by taking a reference
1678 * on the vm and a reference on the object. Also takes the object lock so
1679 * destruction from __i915_vma_parked() can be blocked by holding the
1680 * object lock. Since the object lock is only allowed from within i915 with
1681 * an object refcount, holding the object lock also implicitly blocks the
1682 * vma freeing from __i915_gem_object_pages_fini().
1684 * Because of locks taken during destruction, a vma is also guaranteed to
1685 * stay alive while the following locks are held if it was looked up while
1686 * holding one of the locks:
1691 void i915_vma_destroy_locked(struct i915_vma *vma)
1693 lockdep_assert_held(&vma->vm->mutex);
1696 list_del_init(&vma->vm_link);
1697 release_references(vma, false);
1700 void i915_vma_destroy(struct i915_vma *vma)
1704 mutex_lock(&vma->vm->mutex);
1706 list_del_init(&vma->vm_link);
1707 vm_ddestroy = vma->vm_ddestroy;
1708 vma->vm_ddestroy = false;
1709 mutex_unlock(&vma->vm->mutex);
1710 release_references(vma, vm_ddestroy);
1713 void i915_vma_parked(struct intel_gt *gt)
1715 struct i915_vma *vma, *next;
1718 spin_lock_irq(>->closed_lock);
1719 list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) {
1720 struct drm_i915_gem_object *obj = vma->obj;
1721 struct i915_address_space *vm = vma->vm;
1723 /* XXX All to avoid keeping a reference on i915_vma itself */
1725 if (!kref_get_unless_zero(&obj->base.refcount))
1728 if (!i915_vm_tryget(vm)) {
1729 i915_gem_object_put(obj);
1733 list_move(&vma->closed_link, &closed);
1735 spin_unlock_irq(>->closed_lock);
1737 /* As the GT is held idle, no vma can be reopened as we destroy them */
1738 list_for_each_entry_safe(vma, next, &closed, closed_link) {
1739 struct drm_i915_gem_object *obj = vma->obj;
1740 struct i915_address_space *vm = vma->vm;
1742 if (i915_gem_object_trylock(obj, NULL)) {
1743 INIT_LIST_HEAD(&vma->closed_link);
1744 i915_vma_destroy(vma);
1745 i915_gem_object_unlock(obj);
1748 spin_lock_irq(>->closed_lock);
1749 list_add(&vma->closed_link, >->closed_vma);
1750 spin_unlock_irq(>->closed_lock);
1753 i915_gem_object_put(obj);
1758 static void __i915_vma_iounmap(struct i915_vma *vma)
1760 GEM_BUG_ON(i915_vma_is_pinned(vma));
1762 if (vma->iomap == NULL)
1765 io_mapping_unmap(vma->iomap);
1769 void i915_vma_revoke_mmap(struct i915_vma *vma)
1771 struct drm_vma_offset_node *node;
1774 if (!i915_vma_has_userfault(vma))
1777 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1778 GEM_BUG_ON(!vma->obj->userfault_count);
1780 node = &vma->mmo->vma_node;
1781 vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1782 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1783 drm_vma_node_offset_addr(node) + vma_offset,
1787 i915_vma_unset_userfault(vma);
1788 if (!--vma->obj->userfault_count)
1789 list_del(&vma->obj->userfault_link);
1793 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1795 return __i915_request_await_exclusive(rq, &vma->active);
1798 static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1802 /* Wait for the vma to be bound before we start! */
1803 err = __i915_request_await_bind(rq, vma);
1807 return i915_active_add_request(&vma->active, rq);
1810 int _i915_vma_move_to_active(struct i915_vma *vma,
1811 struct i915_request *rq,
1812 struct dma_fence *fence,
1815 struct drm_i915_gem_object *obj = vma->obj;
1818 assert_object_held(obj);
1820 GEM_BUG_ON(!vma->pages);
1822 err = __i915_vma_move_to_active(vma, rq);
1826 if (flags & EXEC_OBJECT_WRITE) {
1827 struct intel_frontbuffer *front;
1829 front = __intel_frontbuffer_get(obj);
1830 if (unlikely(front)) {
1831 if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1832 i915_active_add_request(&front->write, rq);
1833 intel_frontbuffer_put(front);
1836 if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
1837 err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
1843 dma_resv_add_fence(vma->obj->base.resv, fence,
1844 DMA_RESV_USAGE_WRITE);
1845 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1846 obj->read_domains = 0;
1849 if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
1850 err = dma_resv_reserve_fences(vma->obj->base.resv, 1);
1856 dma_resv_add_fence(vma->obj->base.resv, fence,
1857 DMA_RESV_USAGE_READ);
1858 obj->write_domain = 0;
1862 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1863 i915_active_add_request(&vma->fence->active, rq);
1865 obj->read_domains |= I915_GEM_GPU_DOMAINS;
1866 obj->mm.dirty = true;
1868 GEM_BUG_ON(!i915_vma_is_active(vma));
1872 struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
1874 struct i915_vma_resource *vma_res = vma->resource;
1875 struct dma_fence *unbind_fence;
1877 GEM_BUG_ON(i915_vma_is_pinned(vma));
1878 assert_vma_held_evict(vma);
1880 if (i915_vma_is_map_and_fenceable(vma)) {
1881 /* Force a pagefault for domain tracking on next user access */
1882 i915_vma_revoke_mmap(vma);
1885 * Check that we have flushed all writes through the GGTT
1886 * before the unbind, other due to non-strict nature of those
1887 * indirect writes they may end up referencing the GGTT PTE
1890 * Note that we may be concurrently poking at the GGTT_WRITE
1891 * bit from set-domain, as we mark all GGTT vma associated
1892 * with an object. We know this is for another vma, as we
1893 * are currently unbinding this one -- so if this vma will be
1894 * reused, it will be refaulted and have its dirty bit set
1895 * before the next write.
1897 i915_vma_flush_writes(vma);
1899 /* release the fence reg _after_ flushing */
1900 i915_vma_revoke_fence(vma);
1902 __i915_vma_iounmap(vma);
1903 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1905 GEM_BUG_ON(vma->fence);
1906 GEM_BUG_ON(i915_vma_has_userfault(vma));
1908 /* Object backend must be async capable. */
1909 GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
1911 /* If vm is not open, unbind is a nop. */
1912 vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
1913 kref_read(&vma->vm->ref);
1914 vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) ||
1915 vma->vm->skip_pte_rewrite;
1916 trace_i915_vma_unbind(vma);
1918 unbind_fence = i915_vma_resource_unbind(vma_res);
1919 vma->resource = NULL;
1921 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1924 i915_vma_detach(vma);
1926 if (!async && unbind_fence) {
1927 dma_fence_wait(unbind_fence, false);
1928 dma_fence_put(unbind_fence);
1929 unbind_fence = NULL;
1933 * Binding itself may not have completed until the unbind fence signals,
1934 * so don't drop the pages until that happens, unless the resource is
1938 vma_unbind_pages(vma);
1939 return unbind_fence;
1942 int __i915_vma_unbind(struct i915_vma *vma)
1946 lockdep_assert_held(&vma->vm->mutex);
1947 assert_vma_held_evict(vma);
1949 if (!drm_mm_node_allocated(&vma->node))
1952 if (i915_vma_is_pinned(vma)) {
1953 vma_print_allocator(vma, "is pinned");
1958 * After confirming that no one else is pinning this vma, wait for
1959 * any laggards who may have crept in during the wait (through
1960 * a residual pin skipping the vm->mutex) to complete.
1962 ret = i915_vma_sync(vma);
1966 GEM_BUG_ON(i915_vma_is_active(vma));
1967 __i915_vma_evict(vma, false);
1969 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
1973 static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
1975 struct dma_fence *fence;
1977 lockdep_assert_held(&vma->vm->mutex);
1979 if (!drm_mm_node_allocated(&vma->node))
1982 if (i915_vma_is_pinned(vma) ||
1983 &vma->obj->mm.rsgt->table != vma->resource->bi.pages)
1984 return ERR_PTR(-EAGAIN);
1987 * We probably need to replace this with awaiting the fences of the
1988 * object's dma_resv when the vma active goes away. When doing that
1989 * we need to be careful to not add the vma_resource unbind fence
1990 * immediately to the object's dma_resv, because then unbinding
1991 * the next vma from the object, in case there are many, will
1992 * actually await the unbinding of the previous vmas, which is
1995 if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
1996 I915_ACTIVE_AWAIT_EXCL |
1997 I915_ACTIVE_AWAIT_ACTIVE) < 0) {
1998 return ERR_PTR(-EBUSY);
2001 fence = __i915_vma_evict(vma, true);
2003 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
2008 int i915_vma_unbind(struct i915_vma *vma)
2010 struct i915_address_space *vm = vma->vm;
2011 intel_wakeref_t wakeref = 0;
2014 assert_object_held_shared(vma->obj);
2016 /* Optimistic wait before taking the mutex */
2017 err = i915_vma_sync(vma);
2021 if (!drm_mm_node_allocated(&vma->node))
2024 if (i915_vma_is_pinned(vma)) {
2025 vma_print_allocator(vma, "is pinned");
2029 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
2030 /* XXX not always required: nop_clear_range */
2031 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
2033 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
2037 err = __i915_vma_unbind(vma);
2038 mutex_unlock(&vm->mutex);
2042 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
2046 int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
2048 struct drm_i915_gem_object *obj = vma->obj;
2049 struct i915_address_space *vm = vma->vm;
2050 intel_wakeref_t wakeref = 0;
2051 struct dma_fence *fence;
2055 * We need the dma-resv lock since we add the
2056 * unbind fence to the dma-resv object.
2058 assert_object_held(obj);
2060 if (!drm_mm_node_allocated(&vma->node))
2063 if (i915_vma_is_pinned(vma)) {
2064 vma_print_allocator(vma, "is pinned");
2071 err = dma_resv_reserve_fences(obj->base.resv, 1);
2076 * It would be great if we could grab this wakeref from the
2077 * async unbind work if needed, but we can't because it uses
2078 * kmalloc and it's in the dma-fence signalling critical path.
2080 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
2081 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
2083 if (trylock_vm && !mutex_trylock(&vm->mutex)) {
2086 } else if (!trylock_vm) {
2087 err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref);
2092 fence = __i915_vma_unbind_async(vma);
2093 mutex_unlock(&vm->mutex);
2094 if (IS_ERR_OR_NULL(fence)) {
2095 err = PTR_ERR_OR_ZERO(fence);
2099 dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ);
2100 dma_fence_put(fence);
2104 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
2108 int i915_vma_unbind_unlocked(struct i915_vma *vma)
2112 i915_gem_object_lock(vma->obj, NULL);
2113 err = i915_vma_unbind(vma);
2114 i915_gem_object_unlock(vma->obj);
2119 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
2121 i915_gem_object_make_unshrinkable(vma->obj);
2125 void i915_vma_make_shrinkable(struct i915_vma *vma)
2127 i915_gem_object_make_shrinkable(vma->obj);
2130 void i915_vma_make_purgeable(struct i915_vma *vma)
2132 i915_gem_object_make_purgeable(vma->obj);
2135 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2136 #include "selftests/i915_vma.c"
2139 void i915_vma_module_exit(void)
2141 kmem_cache_destroy(slab_vmas);
2144 int __init i915_vma_module_init(void)
2146 slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);