drm/i915: Preallocate stashes for vma page-directories
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_vma.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/sched/mm.h>
26 #include <drm/drm_gem.h>
27
28 #include "display/intel_frontbuffer.h"
29
30 #include "gt/intel_engine.h"
31 #include "gt/intel_engine_heartbeat.h"
32 #include "gt/intel_gt.h"
33 #include "gt/intel_gt_requests.h"
34
35 #include "i915_drv.h"
36 #include "i915_globals.h"
37 #include "i915_sw_fence_work.h"
38 #include "i915_trace.h"
39 #include "i915_vma.h"
40
41 static struct i915_global_vma {
42         struct i915_global base;
43         struct kmem_cache *slab_vmas;
44 } global;
45
46 struct i915_vma *i915_vma_alloc(void)
47 {
48         return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
49 }
50
51 void i915_vma_free(struct i915_vma *vma)
52 {
53         return kmem_cache_free(global.slab_vmas, vma);
54 }
55
56 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
57
58 #include <linux/stackdepot.h>
59
60 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
61 {
62         unsigned long *entries;
63         unsigned int nr_entries;
64         char buf[512];
65
66         if (!vma->node.stack) {
67                 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
68                                  vma->node.start, vma->node.size, reason);
69                 return;
70         }
71
72         nr_entries = stack_depot_fetch(vma->node.stack, &entries);
73         stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
74         DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
75                          vma->node.start, vma->node.size, reason, buf);
76 }
77
78 #else
79
80 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
81 {
82 }
83
84 #endif
85
86 static inline struct i915_vma *active_to_vma(struct i915_active *ref)
87 {
88         return container_of(ref, typeof(struct i915_vma), active);
89 }
90
91 static int __i915_vma_active(struct i915_active *ref)
92 {
93         return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
94 }
95
96 __i915_active_call
97 static void __i915_vma_retire(struct i915_active *ref)
98 {
99         i915_vma_put(active_to_vma(ref));
100 }
101
102 static struct i915_vma *
103 vma_create(struct drm_i915_gem_object *obj,
104            struct i915_address_space *vm,
105            const struct i915_ggtt_view *view)
106 {
107         struct i915_vma *pos = ERR_PTR(-E2BIG);
108         struct i915_vma *vma;
109         struct rb_node *rb, **p;
110
111         /* The aliasing_ppgtt should never be used directly! */
112         GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
113
114         vma = i915_vma_alloc();
115         if (vma == NULL)
116                 return ERR_PTR(-ENOMEM);
117
118         kref_init(&vma->ref);
119         mutex_init(&vma->pages_mutex);
120         vma->vm = i915_vm_get(vm);
121         vma->ops = &vm->vma_ops;
122         vma->obj = obj;
123         vma->resv = obj->base.resv;
124         vma->size = obj->base.size;
125         vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
126
127         i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
128
129         /* Declare ourselves safe for use inside shrinkers */
130         if (IS_ENABLED(CONFIG_LOCKDEP)) {
131                 fs_reclaim_acquire(GFP_KERNEL);
132                 might_lock(&vma->active.mutex);
133                 fs_reclaim_release(GFP_KERNEL);
134         }
135
136         INIT_LIST_HEAD(&vma->closed_link);
137
138         if (view && view->type != I915_GGTT_VIEW_NORMAL) {
139                 vma->ggtt_view = *view;
140                 if (view->type == I915_GGTT_VIEW_PARTIAL) {
141                         GEM_BUG_ON(range_overflows_t(u64,
142                                                      view->partial.offset,
143                                                      view->partial.size,
144                                                      obj->base.size >> PAGE_SHIFT));
145                         vma->size = view->partial.size;
146                         vma->size <<= PAGE_SHIFT;
147                         GEM_BUG_ON(vma->size > obj->base.size);
148                 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
149                         vma->size = intel_rotation_info_size(&view->rotated);
150                         vma->size <<= PAGE_SHIFT;
151                 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
152                         vma->size = intel_remapped_info_size(&view->remapped);
153                         vma->size <<= PAGE_SHIFT;
154                 }
155         }
156
157         if (unlikely(vma->size > vm->total))
158                 goto err_vma;
159
160         GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
161
162         spin_lock(&obj->vma.lock);
163
164         if (i915_is_ggtt(vm)) {
165                 if (unlikely(overflows_type(vma->size, u32)))
166                         goto err_unlock;
167
168                 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
169                                                       i915_gem_object_get_tiling(obj),
170                                                       i915_gem_object_get_stride(obj));
171                 if (unlikely(vma->fence_size < vma->size || /* overflow */
172                              vma->fence_size > vm->total))
173                         goto err_unlock;
174
175                 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
176
177                 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
178                                                                 i915_gem_object_get_tiling(obj),
179                                                                 i915_gem_object_get_stride(obj));
180                 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
181
182                 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
183         }
184
185         rb = NULL;
186         p = &obj->vma.tree.rb_node;
187         while (*p) {
188                 long cmp;
189
190                 rb = *p;
191                 pos = rb_entry(rb, struct i915_vma, obj_node);
192
193                 /*
194                  * If the view already exists in the tree, another thread
195                  * already created a matching vma, so return the older instance
196                  * and dispose of ours.
197                  */
198                 cmp = i915_vma_compare(pos, vm, view);
199                 if (cmp < 0)
200                         p = &rb->rb_right;
201                 else if (cmp > 0)
202                         p = &rb->rb_left;
203                 else
204                         goto err_unlock;
205         }
206         rb_link_node(&vma->obj_node, rb, p);
207         rb_insert_color(&vma->obj_node, &obj->vma.tree);
208
209         if (i915_vma_is_ggtt(vma))
210                 /*
211                  * We put the GGTT vma at the start of the vma-list, followed
212                  * by the ppGGTT vma. This allows us to break early when
213                  * iterating over only the GGTT vma for an object, see
214                  * for_each_ggtt_vma()
215                  */
216                 list_add(&vma->obj_link, &obj->vma.list);
217         else
218                 list_add_tail(&vma->obj_link, &obj->vma.list);
219
220         spin_unlock(&obj->vma.lock);
221
222         return vma;
223
224 err_unlock:
225         spin_unlock(&obj->vma.lock);
226 err_vma:
227         i915_vm_put(vm);
228         i915_vma_free(vma);
229         return pos;
230 }
231
232 static struct i915_vma *
233 vma_lookup(struct drm_i915_gem_object *obj,
234            struct i915_address_space *vm,
235            const struct i915_ggtt_view *view)
236 {
237         struct rb_node *rb;
238
239         rb = obj->vma.tree.rb_node;
240         while (rb) {
241                 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
242                 long cmp;
243
244                 cmp = i915_vma_compare(vma, vm, view);
245                 if (cmp == 0)
246                         return vma;
247
248                 if (cmp < 0)
249                         rb = rb->rb_right;
250                 else
251                         rb = rb->rb_left;
252         }
253
254         return NULL;
255 }
256
257 /**
258  * i915_vma_instance - return the singleton instance of the VMA
259  * @obj: parent &struct drm_i915_gem_object to be mapped
260  * @vm: address space in which the mapping is located
261  * @view: additional mapping requirements
262  *
263  * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
264  * the same @view characteristics. If a match is not found, one is created.
265  * Once created, the VMA is kept until either the object is freed, or the
266  * address space is closed.
267  *
268  * Returns the vma, or an error pointer.
269  */
270 struct i915_vma *
271 i915_vma_instance(struct drm_i915_gem_object *obj,
272                   struct i915_address_space *vm,
273                   const struct i915_ggtt_view *view)
274 {
275         struct i915_vma *vma;
276
277         GEM_BUG_ON(view && !i915_is_ggtt(vm));
278         GEM_BUG_ON(!atomic_read(&vm->open));
279
280         spin_lock(&obj->vma.lock);
281         vma = vma_lookup(obj, vm, view);
282         spin_unlock(&obj->vma.lock);
283
284         /* vma_create() will resolve the race if another creates the vma */
285         if (unlikely(!vma))
286                 vma = vma_create(obj, vm, view);
287
288         GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
289         return vma;
290 }
291
292 struct i915_vma_work {
293         struct dma_fence_work base;
294         struct i915_address_space *vm;
295         struct i915_vm_pt_stash stash;
296         struct i915_vma *vma;
297         struct drm_i915_gem_object *pinned;
298         struct i915_sw_dma_fence_cb cb;
299         enum i915_cache_level cache_level;
300         unsigned int flags;
301 };
302
303 static int __vma_bind(struct dma_fence_work *work)
304 {
305         struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
306         struct i915_vma *vma = vw->vma;
307
308         vma->ops->bind_vma(vw->vm, &vw->stash,
309                            vma, vw->cache_level, vw->flags);
310         return 0;
311 }
312
313 static void __vma_release(struct dma_fence_work *work)
314 {
315         struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
316
317         if (vw->pinned)
318                 __i915_gem_object_unpin_pages(vw->pinned);
319
320         i915_vm_free_pt_stash(vw->vm, &vw->stash);
321         i915_vm_put(vw->vm);
322 }
323
324 static const struct dma_fence_work_ops bind_ops = {
325         .name = "bind",
326         .work = __vma_bind,
327         .release = __vma_release,
328 };
329
330 struct i915_vma_work *i915_vma_work(void)
331 {
332         struct i915_vma_work *vw;
333
334         vw = kzalloc(sizeof(*vw), GFP_KERNEL);
335         if (!vw)
336                 return NULL;
337
338         dma_fence_work_init(&vw->base, &bind_ops);
339         vw->base.dma.error = -EAGAIN; /* disable the worker by default */
340
341         return vw;
342 }
343
344 int i915_vma_wait_for_bind(struct i915_vma *vma)
345 {
346         int err = 0;
347
348         if (rcu_access_pointer(vma->active.excl.fence)) {
349                 struct dma_fence *fence;
350
351                 rcu_read_lock();
352                 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
353                 rcu_read_unlock();
354                 if (fence) {
355                         err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
356                         dma_fence_put(fence);
357                 }
358         }
359
360         return err;
361 }
362
363 /**
364  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
365  * @vma: VMA to map
366  * @cache_level: mapping cache level
367  * @flags: flags like global or local mapping
368  * @work: preallocated worker for allocating and binding the PTE
369  *
370  * DMA addresses are taken from the scatter-gather table of this object (or of
371  * this VMA in case of non-default GGTT views) and PTE entries set up.
372  * Note that DMA addresses are also the only part of the SG table we care about.
373  */
374 int i915_vma_bind(struct i915_vma *vma,
375                   enum i915_cache_level cache_level,
376                   u32 flags,
377                   struct i915_vma_work *work)
378 {
379         u32 bind_flags;
380         u32 vma_flags;
381
382         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
383         GEM_BUG_ON(vma->size > vma->node.size);
384
385         if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
386                                               vma->node.size,
387                                               vma->vm->total)))
388                 return -ENODEV;
389
390         if (GEM_DEBUG_WARN_ON(!flags))
391                 return -EINVAL;
392
393         bind_flags = flags;
394         bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
395
396         vma_flags = atomic_read(&vma->flags);
397         vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
398
399         bind_flags &= ~vma_flags;
400         if (bind_flags == 0)
401                 return 0;
402
403         GEM_BUG_ON(!vma->pages);
404
405         trace_i915_vma_bind(vma, bind_flags);
406         if (work && bind_flags & vma->vm->bind_async_flags) {
407                 struct dma_fence *prev;
408
409                 work->vma = vma;
410                 work->cache_level = cache_level;
411                 work->flags = bind_flags;
412
413                 /*
414                  * Note we only want to chain up to the migration fence on
415                  * the pages (not the object itself). As we don't track that,
416                  * yet, we have to use the exclusive fence instead.
417                  *
418                  * Also note that we do not want to track the async vma as
419                  * part of the obj->resv->excl_fence as it only affects
420                  * execution and not content or object's backing store lifetime.
421                  */
422                 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
423                 if (prev) {
424                         __i915_sw_fence_await_dma_fence(&work->base.chain,
425                                                         prev,
426                                                         &work->cb);
427                         dma_fence_put(prev);
428                 }
429
430                 work->base.dma.error = 0; /* enable the queue_work() */
431
432                 if (vma->obj) {
433                         __i915_gem_object_pin_pages(vma->obj);
434                         work->pinned = vma->obj;
435                 }
436         } else {
437                 vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
438         }
439
440         atomic_or(bind_flags, &vma->flags);
441         return 0;
442 }
443
444 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
445 {
446         void __iomem *ptr;
447         int err;
448
449         if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
450                 err = -ENODEV;
451                 goto err;
452         }
453
454         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
455         GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
456
457         ptr = READ_ONCE(vma->iomap);
458         if (ptr == NULL) {
459                 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
460                                         vma->node.start,
461                                         vma->node.size);
462                 if (ptr == NULL) {
463                         err = -ENOMEM;
464                         goto err;
465                 }
466
467                 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
468                         io_mapping_unmap(ptr);
469                         ptr = vma->iomap;
470                 }
471         }
472
473         __i915_vma_pin(vma);
474
475         err = i915_vma_pin_fence(vma);
476         if (err)
477                 goto err_unpin;
478
479         i915_vma_set_ggtt_write(vma);
480
481         /* NB Access through the GTT requires the device to be awake. */
482         return ptr;
483
484 err_unpin:
485         __i915_vma_unpin(vma);
486 err:
487         return IO_ERR_PTR(err);
488 }
489
490 void i915_vma_flush_writes(struct i915_vma *vma)
491 {
492         if (i915_vma_unset_ggtt_write(vma))
493                 intel_gt_flush_ggtt_writes(vma->vm->gt);
494 }
495
496 void i915_vma_unpin_iomap(struct i915_vma *vma)
497 {
498         GEM_BUG_ON(vma->iomap == NULL);
499
500         i915_vma_flush_writes(vma);
501
502         i915_vma_unpin_fence(vma);
503         i915_vma_unpin(vma);
504 }
505
506 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
507 {
508         struct i915_vma *vma;
509         struct drm_i915_gem_object *obj;
510
511         vma = fetch_and_zero(p_vma);
512         if (!vma)
513                 return;
514
515         obj = vma->obj;
516         GEM_BUG_ON(!obj);
517
518         i915_vma_unpin(vma);
519
520         if (flags & I915_VMA_RELEASE_MAP)
521                 i915_gem_object_unpin_map(obj);
522
523         i915_gem_object_put(obj);
524 }
525
526 bool i915_vma_misplaced(const struct i915_vma *vma,
527                         u64 size, u64 alignment, u64 flags)
528 {
529         if (!drm_mm_node_allocated(&vma->node))
530                 return false;
531
532         if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
533                 return true;
534
535         if (vma->node.size < size)
536                 return true;
537
538         GEM_BUG_ON(alignment && !is_power_of_2(alignment));
539         if (alignment && !IS_ALIGNED(vma->node.start, alignment))
540                 return true;
541
542         if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
543                 return true;
544
545         if (flags & PIN_OFFSET_BIAS &&
546             vma->node.start < (flags & PIN_OFFSET_MASK))
547                 return true;
548
549         if (flags & PIN_OFFSET_FIXED &&
550             vma->node.start != (flags & PIN_OFFSET_MASK))
551                 return true;
552
553         return false;
554 }
555
556 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
557 {
558         bool mappable, fenceable;
559
560         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
561         GEM_BUG_ON(!vma->fence_size);
562
563         fenceable = (vma->node.size >= vma->fence_size &&
564                      IS_ALIGNED(vma->node.start, vma->fence_alignment));
565
566         mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
567
568         if (mappable && fenceable)
569                 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
570         else
571                 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
572 }
573
574 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
575 {
576         struct drm_mm_node *node = &vma->node;
577         struct drm_mm_node *other;
578
579         /*
580          * On some machines we have to be careful when putting differing types
581          * of snoopable memory together to avoid the prefetcher crossing memory
582          * domains and dying. During vm initialisation, we decide whether or not
583          * these constraints apply and set the drm_mm.color_adjust
584          * appropriately.
585          */
586         if (!i915_vm_has_cache_coloring(vma->vm))
587                 return true;
588
589         /* Only valid to be called on an already inserted vma */
590         GEM_BUG_ON(!drm_mm_node_allocated(node));
591         GEM_BUG_ON(list_empty(&node->node_list));
592
593         other = list_prev_entry(node, node_list);
594         if (i915_node_color_differs(other, color) &&
595             !drm_mm_hole_follows(other))
596                 return false;
597
598         other = list_next_entry(node, node_list);
599         if (i915_node_color_differs(other, color) &&
600             !drm_mm_hole_follows(node))
601                 return false;
602
603         return true;
604 }
605
606 /**
607  * i915_vma_insert - finds a slot for the vma in its address space
608  * @vma: the vma
609  * @size: requested size in bytes (can be larger than the VMA)
610  * @alignment: required alignment
611  * @flags: mask of PIN_* flags to use
612  *
613  * First we try to allocate some free space that meets the requirements for
614  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
615  * preferrably the oldest idle entry to make room for the new VMA.
616  *
617  * Returns:
618  * 0 on success, negative error code otherwise.
619  */
620 static int
621 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
622 {
623         unsigned long color;
624         u64 start, end;
625         int ret;
626
627         GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
628         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
629
630         size = max(size, vma->size);
631         alignment = max(alignment, vma->display_alignment);
632         if (flags & PIN_MAPPABLE) {
633                 size = max_t(typeof(size), size, vma->fence_size);
634                 alignment = max_t(typeof(alignment),
635                                   alignment, vma->fence_alignment);
636         }
637
638         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
639         GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
640         GEM_BUG_ON(!is_power_of_2(alignment));
641
642         start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
643         GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
644
645         end = vma->vm->total;
646         if (flags & PIN_MAPPABLE)
647                 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
648         if (flags & PIN_ZONE_4G)
649                 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
650         GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
651
652         /* If binding the object/GGTT view requires more space than the entire
653          * aperture has, reject it early before evicting everything in a vain
654          * attempt to find space.
655          */
656         if (size > end) {
657                 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
658                           size, flags & PIN_MAPPABLE ? "mappable" : "total",
659                           end);
660                 return -ENOSPC;
661         }
662
663         color = 0;
664         if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
665                 color = vma->obj->cache_level;
666
667         if (flags & PIN_OFFSET_FIXED) {
668                 u64 offset = flags & PIN_OFFSET_MASK;
669                 if (!IS_ALIGNED(offset, alignment) ||
670                     range_overflows(offset, size, end))
671                         return -EINVAL;
672
673                 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
674                                            size, offset, color,
675                                            flags);
676                 if (ret)
677                         return ret;
678         } else {
679                 /*
680                  * We only support huge gtt pages through the 48b PPGTT,
681                  * however we also don't want to force any alignment for
682                  * objects which need to be tightly packed into the low 32bits.
683                  *
684                  * Note that we assume that GGTT are limited to 4GiB for the
685                  * forseeable future. See also i915_ggtt_offset().
686                  */
687                 if (upper_32_bits(end - 1) &&
688                     vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
689                         /*
690                          * We can't mix 64K and 4K PTEs in the same page-table
691                          * (2M block), and so to avoid the ugliness and
692                          * complexity of coloring we opt for just aligning 64K
693                          * objects to 2M.
694                          */
695                         u64 page_alignment =
696                                 rounddown_pow_of_two(vma->page_sizes.sg |
697                                                      I915_GTT_PAGE_SIZE_2M);
698
699                         /*
700                          * Check we don't expand for the limited Global GTT
701                          * (mappable aperture is even more precious!). This
702                          * also checks that we exclude the aliasing-ppgtt.
703                          */
704                         GEM_BUG_ON(i915_vma_is_ggtt(vma));
705
706                         alignment = max(alignment, page_alignment);
707
708                         if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
709                                 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
710                 }
711
712                 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
713                                           size, alignment, color,
714                                           start, end, flags);
715                 if (ret)
716                         return ret;
717
718                 GEM_BUG_ON(vma->node.start < start);
719                 GEM_BUG_ON(vma->node.start + vma->node.size > end);
720         }
721         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
722         GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
723
724         list_add_tail(&vma->vm_link, &vma->vm->bound_list);
725
726         return 0;
727 }
728
729 static void
730 i915_vma_detach(struct i915_vma *vma)
731 {
732         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
733         GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
734
735         /*
736          * And finally now the object is completely decoupled from this
737          * vma, we can drop its hold on the backing storage and allow
738          * it to be reaped by the shrinker.
739          */
740         list_del(&vma->vm_link);
741 }
742
743 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
744 {
745         unsigned int bound;
746         bool pinned = true;
747
748         bound = atomic_read(&vma->flags);
749         do {
750                 if (unlikely(flags & ~bound))
751                         return false;
752
753                 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
754                         return false;
755
756                 if (!(bound & I915_VMA_PIN_MASK))
757                         goto unpinned;
758
759                 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
760         } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
761
762         return true;
763
764 unpinned:
765         /*
766          * If pin_count==0, but we are bound, check under the lock to avoid
767          * racing with a concurrent i915_vma_unbind().
768          */
769         mutex_lock(&vma->vm->mutex);
770         do {
771                 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
772                         pinned = false;
773                         break;
774                 }
775
776                 if (unlikely(flags & ~bound)) {
777                         pinned = false;
778                         break;
779                 }
780         } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
781         mutex_unlock(&vma->vm->mutex);
782
783         return pinned;
784 }
785
786 static int vma_get_pages(struct i915_vma *vma)
787 {
788         int err = 0;
789
790         if (atomic_add_unless(&vma->pages_count, 1, 0))
791                 return 0;
792
793         /* Allocations ahoy! */
794         if (mutex_lock_interruptible(&vma->pages_mutex))
795                 return -EINTR;
796
797         if (!atomic_read(&vma->pages_count)) {
798                 if (vma->obj) {
799                         err = i915_gem_object_pin_pages(vma->obj);
800                         if (err)
801                                 goto unlock;
802                 }
803
804                 err = vma->ops->set_pages(vma);
805                 if (err) {
806                         if (vma->obj)
807                                 i915_gem_object_unpin_pages(vma->obj);
808                         goto unlock;
809                 }
810         }
811         atomic_inc(&vma->pages_count);
812
813 unlock:
814         mutex_unlock(&vma->pages_mutex);
815
816         return err;
817 }
818
819 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
820 {
821         /* We allocate under vma_get_pages, so beware the shrinker */
822         mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
823         GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
824         if (atomic_sub_return(count, &vma->pages_count) == 0) {
825                 vma->ops->clear_pages(vma);
826                 GEM_BUG_ON(vma->pages);
827                 if (vma->obj)
828                         i915_gem_object_unpin_pages(vma->obj);
829         }
830         mutex_unlock(&vma->pages_mutex);
831 }
832
833 static void vma_put_pages(struct i915_vma *vma)
834 {
835         if (atomic_add_unless(&vma->pages_count, -1, 1))
836                 return;
837
838         __vma_put_pages(vma, 1);
839 }
840
841 static void vma_unbind_pages(struct i915_vma *vma)
842 {
843         unsigned int count;
844
845         lockdep_assert_held(&vma->vm->mutex);
846
847         /* The upper portion of pages_count is the number of bindings */
848         count = atomic_read(&vma->pages_count);
849         count >>= I915_VMA_PAGES_BIAS;
850         GEM_BUG_ON(!count);
851
852         __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
853 }
854
855 int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
856 {
857         struct i915_vma_work *work = NULL;
858         intel_wakeref_t wakeref = 0;
859         unsigned int bound;
860         int err;
861
862         BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
863         BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
864
865         GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
866
867         /* First try and grab the pin without rebinding the vma */
868         if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
869                 return 0;
870
871         err = vma_get_pages(vma);
872         if (err)
873                 return err;
874
875         if (flags & vma->vm->bind_async_flags) {
876                 work = i915_vma_work();
877                 if (!work) {
878                         err = -ENOMEM;
879                         goto err_pages;
880                 }
881
882                 work->vm = i915_vm_get(vma->vm);
883
884                 /* Allocate enough page directories to used PTE */
885                 if (vma->vm->allocate_va_range)
886                         i915_vm_alloc_pt_stash(vma->vm,
887                                                &work->stash,
888                                                vma->size);
889         }
890
891         if (flags & PIN_GLOBAL)
892                 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
893
894         /*
895          * Differentiate between user/kernel vma inside the aliasing-ppgtt.
896          *
897          * We conflate the Global GTT with the user's vma when using the
898          * aliasing-ppgtt, but it is still vitally important to try and
899          * keep the use cases distinct. For example, userptr objects are
900          * not allowed inside the Global GTT as that will cause lock
901          * inversions when we have to evict them the mmu_notifier callbacks -
902          * but they are allowed to be part of the user ppGTT which can never
903          * be mapped. As such we try to give the distinct users of the same
904          * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
905          * and i915_ppgtt separate].
906          *
907          * NB this may cause us to mask real lock inversions -- while the
908          * code is safe today, lockdep may not be able to spot future
909          * transgressions.
910          */
911         err = mutex_lock_interruptible_nested(&vma->vm->mutex,
912                                               !(flags & PIN_GLOBAL));
913         if (err)
914                 goto err_fence;
915
916         /* No more allocations allowed now we hold vm->mutex */
917
918         if (unlikely(i915_vma_is_closed(vma))) {
919                 err = -ENOENT;
920                 goto err_unlock;
921         }
922
923         bound = atomic_read(&vma->flags);
924         if (unlikely(bound & I915_VMA_ERROR)) {
925                 err = -ENOMEM;
926                 goto err_unlock;
927         }
928
929         if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
930                 err = -EAGAIN; /* pins are meant to be fairly temporary */
931                 goto err_unlock;
932         }
933
934         if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
935                 __i915_vma_pin(vma);
936                 goto err_unlock;
937         }
938
939         err = i915_active_acquire(&vma->active);
940         if (err)
941                 goto err_unlock;
942
943         if (!(bound & I915_VMA_BIND_MASK)) {
944                 err = i915_vma_insert(vma, size, alignment, flags);
945                 if (err)
946                         goto err_active;
947
948                 if (i915_is_ggtt(vma->vm))
949                         __i915_vma_set_map_and_fenceable(vma);
950         }
951
952         GEM_BUG_ON(!vma->pages);
953         err = i915_vma_bind(vma,
954                             vma->obj ? vma->obj->cache_level : 0,
955                             flags, work);
956         if (err)
957                 goto err_remove;
958
959         /* There should only be at most 2 active bindings (user, global) */
960         GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
961         atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
962         list_move_tail(&vma->vm_link, &vma->vm->bound_list);
963
964         __i915_vma_pin(vma);
965         GEM_BUG_ON(!i915_vma_is_pinned(vma));
966         GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
967         GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
968
969 err_remove:
970         if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
971                 i915_vma_detach(vma);
972                 drm_mm_remove_node(&vma->node);
973         }
974 err_active:
975         i915_active_release(&vma->active);
976 err_unlock:
977         mutex_unlock(&vma->vm->mutex);
978 err_fence:
979         if (work)
980                 dma_fence_work_commit_imm(&work->base);
981         if (wakeref)
982                 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
983 err_pages:
984         vma_put_pages(vma);
985         return err;
986 }
987
988 static void flush_idle_contexts(struct intel_gt *gt)
989 {
990         struct intel_engine_cs *engine;
991         enum intel_engine_id id;
992
993         for_each_engine(engine, gt, id)
994                 intel_engine_flush_barriers(engine);
995
996         intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
997 }
998
999 int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
1000 {
1001         struct i915_address_space *vm = vma->vm;
1002         int err;
1003
1004         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1005
1006         do {
1007                 err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
1008                 if (err != -ENOSPC) {
1009                         if (!err) {
1010                                 err = i915_vma_wait_for_bind(vma);
1011                                 if (err)
1012                                         i915_vma_unpin(vma);
1013                         }
1014                         return err;
1015                 }
1016
1017                 /* Unlike i915_vma_pin, we don't take no for an answer! */
1018                 flush_idle_contexts(vm->gt);
1019                 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1020                         i915_gem_evict_vm(vm);
1021                         mutex_unlock(&vm->mutex);
1022                 }
1023         } while (1);
1024 }
1025
1026 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1027 {
1028         /*
1029          * We defer actually closing, unbinding and destroying the VMA until
1030          * the next idle point, or if the object is freed in the meantime. By
1031          * postponing the unbind, we allow for it to be resurrected by the
1032          * client, avoiding the work required to rebind the VMA. This is
1033          * advantageous for DRI, where the client/server pass objects
1034          * between themselves, temporarily opening a local VMA to the
1035          * object, and then closing it again. The same object is then reused
1036          * on the next frame (or two, depending on the depth of the swap queue)
1037          * causing us to rebind the VMA once more. This ends up being a lot
1038          * of wasted work for the steady state.
1039          */
1040         GEM_BUG_ON(i915_vma_is_closed(vma));
1041         list_add(&vma->closed_link, &gt->closed_vma);
1042 }
1043
1044 void i915_vma_close(struct i915_vma *vma)
1045 {
1046         struct intel_gt *gt = vma->vm->gt;
1047         unsigned long flags;
1048
1049         if (i915_vma_is_ggtt(vma))
1050                 return;
1051
1052         GEM_BUG_ON(!atomic_read(&vma->open_count));
1053         if (atomic_dec_and_lock_irqsave(&vma->open_count,
1054                                         &gt->closed_lock,
1055                                         flags)) {
1056                 __vma_close(vma, gt);
1057                 spin_unlock_irqrestore(&gt->closed_lock, flags);
1058         }
1059 }
1060
1061 static void __i915_vma_remove_closed(struct i915_vma *vma)
1062 {
1063         struct intel_gt *gt = vma->vm->gt;
1064
1065         spin_lock_irq(&gt->closed_lock);
1066         list_del_init(&vma->closed_link);
1067         spin_unlock_irq(&gt->closed_lock);
1068 }
1069
1070 void i915_vma_reopen(struct i915_vma *vma)
1071 {
1072         if (i915_vma_is_closed(vma))
1073                 __i915_vma_remove_closed(vma);
1074 }
1075
1076 void i915_vma_release(struct kref *ref)
1077 {
1078         struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1079
1080         if (drm_mm_node_allocated(&vma->node)) {
1081                 mutex_lock(&vma->vm->mutex);
1082                 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1083                 WARN_ON(__i915_vma_unbind(vma));
1084                 mutex_unlock(&vma->vm->mutex);
1085                 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1086         }
1087         GEM_BUG_ON(i915_vma_is_active(vma));
1088
1089         if (vma->obj) {
1090                 struct drm_i915_gem_object *obj = vma->obj;
1091
1092                 spin_lock(&obj->vma.lock);
1093                 list_del(&vma->obj_link);
1094                 if (!RB_EMPTY_NODE(&vma->obj_node))
1095                         rb_erase(&vma->obj_node, &obj->vma.tree);
1096                 spin_unlock(&obj->vma.lock);
1097         }
1098
1099         __i915_vma_remove_closed(vma);
1100         i915_vm_put(vma->vm);
1101
1102         i915_active_fini(&vma->active);
1103         i915_vma_free(vma);
1104 }
1105
1106 void i915_vma_parked(struct intel_gt *gt)
1107 {
1108         struct i915_vma *vma, *next;
1109         LIST_HEAD(closed);
1110
1111         spin_lock_irq(&gt->closed_lock);
1112         list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
1113                 struct drm_i915_gem_object *obj = vma->obj;
1114                 struct i915_address_space *vm = vma->vm;
1115
1116                 /* XXX All to avoid keeping a reference on i915_vma itself */
1117
1118                 if (!kref_get_unless_zero(&obj->base.refcount))
1119                         continue;
1120
1121                 if (!i915_vm_tryopen(vm)) {
1122                         i915_gem_object_put(obj);
1123                         continue;
1124                 }
1125
1126                 list_move(&vma->closed_link, &closed);
1127         }
1128         spin_unlock_irq(&gt->closed_lock);
1129
1130         /* As the GT is held idle, no vma can be reopened as we destroy them */
1131         list_for_each_entry_safe(vma, next, &closed, closed_link) {
1132                 struct drm_i915_gem_object *obj = vma->obj;
1133                 struct i915_address_space *vm = vma->vm;
1134
1135                 INIT_LIST_HEAD(&vma->closed_link);
1136                 __i915_vma_put(vma);
1137
1138                 i915_gem_object_put(obj);
1139                 i915_vm_close(vm);
1140         }
1141 }
1142
1143 static void __i915_vma_iounmap(struct i915_vma *vma)
1144 {
1145         GEM_BUG_ON(i915_vma_is_pinned(vma));
1146
1147         if (vma->iomap == NULL)
1148                 return;
1149
1150         io_mapping_unmap(vma->iomap);
1151         vma->iomap = NULL;
1152 }
1153
1154 void i915_vma_revoke_mmap(struct i915_vma *vma)
1155 {
1156         struct drm_vma_offset_node *node;
1157         u64 vma_offset;
1158
1159         if (!i915_vma_has_userfault(vma))
1160                 return;
1161
1162         GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1163         GEM_BUG_ON(!vma->obj->userfault_count);
1164
1165         node = &vma->mmo->vma_node;
1166         vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1167         unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1168                             drm_vma_node_offset_addr(node) + vma_offset,
1169                             vma->size,
1170                             1);
1171
1172         i915_vma_unset_userfault(vma);
1173         if (!--vma->obj->userfault_count)
1174                 list_del(&vma->obj->userfault_link);
1175 }
1176
1177 static int
1178 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1179 {
1180         return __i915_request_await_exclusive(rq, &vma->active);
1181 }
1182
1183 int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1184 {
1185         int err;
1186
1187         GEM_BUG_ON(!i915_vma_is_pinned(vma));
1188
1189         /* Wait for the vma to be bound before we start! */
1190         err = __i915_request_await_bind(rq, vma);
1191         if (err)
1192                 return err;
1193
1194         return i915_active_add_request(&vma->active, rq);
1195 }
1196
1197 int i915_vma_move_to_active(struct i915_vma *vma,
1198                             struct i915_request *rq,
1199                             unsigned int flags)
1200 {
1201         struct drm_i915_gem_object *obj = vma->obj;
1202         int err;
1203
1204         assert_object_held(obj);
1205
1206         err = __i915_vma_move_to_active(vma, rq);
1207         if (unlikely(err))
1208                 return err;
1209
1210         if (flags & EXEC_OBJECT_WRITE) {
1211                 struct intel_frontbuffer *front;
1212
1213                 front = __intel_frontbuffer_get(obj);
1214                 if (unlikely(front)) {
1215                         if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1216                                 i915_active_add_request(&front->write, rq);
1217                         intel_frontbuffer_put(front);
1218                 }
1219
1220                 dma_resv_add_excl_fence(vma->resv, &rq->fence);
1221                 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1222                 obj->read_domains = 0;
1223         } else {
1224                 err = dma_resv_reserve_shared(vma->resv, 1);
1225                 if (unlikely(err))
1226                         return err;
1227
1228                 dma_resv_add_shared_fence(vma->resv, &rq->fence);
1229                 obj->write_domain = 0;
1230         }
1231
1232         if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1233                 i915_active_add_request(&vma->fence->active, rq);
1234
1235         obj->read_domains |= I915_GEM_GPU_DOMAINS;
1236         obj->mm.dirty = true;
1237
1238         GEM_BUG_ON(!i915_vma_is_active(vma));
1239         return 0;
1240 }
1241
1242 void __i915_vma_evict(struct i915_vma *vma)
1243 {
1244         GEM_BUG_ON(i915_vma_is_pinned(vma));
1245
1246         if (i915_vma_is_map_and_fenceable(vma)) {
1247                 /* Force a pagefault for domain tracking on next user access */
1248                 i915_vma_revoke_mmap(vma);
1249
1250                 /*
1251                  * Check that we have flushed all writes through the GGTT
1252                  * before the unbind, other due to non-strict nature of those
1253                  * indirect writes they may end up referencing the GGTT PTE
1254                  * after the unbind.
1255                  *
1256                  * Note that we may be concurrently poking at the GGTT_WRITE
1257                  * bit from set-domain, as we mark all GGTT vma associated
1258                  * with an object. We know this is for another vma, as we
1259                  * are currently unbinding this one -- so if this vma will be
1260                  * reused, it will be refaulted and have its dirty bit set
1261                  * before the next write.
1262                  */
1263                 i915_vma_flush_writes(vma);
1264
1265                 /* release the fence reg _after_ flushing */
1266                 i915_vma_revoke_fence(vma);
1267
1268                 __i915_vma_iounmap(vma);
1269                 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1270         }
1271         GEM_BUG_ON(vma->fence);
1272         GEM_BUG_ON(i915_vma_has_userfault(vma));
1273
1274         if (likely(atomic_read(&vma->vm->open))) {
1275                 trace_i915_vma_unbind(vma);
1276                 vma->ops->unbind_vma(vma->vm, vma);
1277         }
1278         atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1279                    &vma->flags);
1280
1281         i915_vma_detach(vma);
1282         vma_unbind_pages(vma);
1283 }
1284
1285 int __i915_vma_unbind(struct i915_vma *vma)
1286 {
1287         int ret;
1288
1289         lockdep_assert_held(&vma->vm->mutex);
1290
1291         if (!drm_mm_node_allocated(&vma->node))
1292                 return 0;
1293
1294         if (i915_vma_is_pinned(vma)) {
1295                 vma_print_allocator(vma, "is pinned");
1296                 return -EAGAIN;
1297         }
1298
1299         /*
1300          * After confirming that no one else is pinning this vma, wait for
1301          * any laggards who may have crept in during the wait (through
1302          * a residual pin skipping the vm->mutex) to complete.
1303          */
1304         ret = i915_vma_sync(vma);
1305         if (ret)
1306                 return ret;
1307
1308         GEM_BUG_ON(i915_vma_is_active(vma));
1309         __i915_vma_evict(vma);
1310
1311         drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
1312         return 0;
1313 }
1314
1315 int i915_vma_unbind(struct i915_vma *vma)
1316 {
1317         struct i915_address_space *vm = vma->vm;
1318         intel_wakeref_t wakeref = 0;
1319         int err;
1320
1321         /* Optimistic wait before taking the mutex */
1322         err = i915_vma_sync(vma);
1323         if (err)
1324                 return err;
1325
1326         if (!drm_mm_node_allocated(&vma->node))
1327                 return 0;
1328
1329         if (i915_vma_is_pinned(vma)) {
1330                 vma_print_allocator(vma, "is pinned");
1331                 return -EAGAIN;
1332         }
1333
1334         if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1335                 /* XXX not always required: nop_clear_range */
1336                 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1337
1338         err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
1339         if (err)
1340                 goto out_rpm;
1341
1342         err = __i915_vma_unbind(vma);
1343         mutex_unlock(&vm->mutex);
1344
1345 out_rpm:
1346         if (wakeref)
1347                 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
1348         return err;
1349 }
1350
1351 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1352 {
1353         i915_gem_object_make_unshrinkable(vma->obj);
1354         return vma;
1355 }
1356
1357 void i915_vma_make_shrinkable(struct i915_vma *vma)
1358 {
1359         i915_gem_object_make_shrinkable(vma->obj);
1360 }
1361
1362 void i915_vma_make_purgeable(struct i915_vma *vma)
1363 {
1364         i915_gem_object_make_purgeable(vma->obj);
1365 }
1366
1367 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1368 #include "selftests/i915_vma.c"
1369 #endif
1370
1371 static void i915_global_vma_shrink(void)
1372 {
1373         kmem_cache_shrink(global.slab_vmas);
1374 }
1375
1376 static void i915_global_vma_exit(void)
1377 {
1378         kmem_cache_destroy(global.slab_vmas);
1379 }
1380
1381 static struct i915_global_vma global = { {
1382         .shrink = i915_global_vma_shrink,
1383         .exit = i915_global_vma_exit,
1384 } };
1385
1386 int __init i915_global_vma_init(void)
1387 {
1388         global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1389         if (!global.slab_vmas)
1390                 return -ENOMEM;
1391
1392         i915_global_register(&global.base);
1393         return 0;
1394 }