Merge tag 'renesas-fixes-for-v5.8-tag1' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_vma.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/sched/mm.h>
26 #include <drm/drm_gem.h>
27
28 #include "display/intel_frontbuffer.h"
29
30 #include "gt/intel_engine.h"
31 #include "gt/intel_engine_heartbeat.h"
32 #include "gt/intel_gt.h"
33 #include "gt/intel_gt_requests.h"
34
35 #include "i915_drv.h"
36 #include "i915_globals.h"
37 #include "i915_sw_fence_work.h"
38 #include "i915_trace.h"
39 #include "i915_vma.h"
40
41 static struct i915_global_vma {
42         struct i915_global base;
43         struct kmem_cache *slab_vmas;
44 } global;
45
46 struct i915_vma *i915_vma_alloc(void)
47 {
48         return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
49 }
50
51 void i915_vma_free(struct i915_vma *vma)
52 {
53         return kmem_cache_free(global.slab_vmas, vma);
54 }
55
56 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
57
58 #include <linux/stackdepot.h>
59
60 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
61 {
62         unsigned long *entries;
63         unsigned int nr_entries;
64         char buf[512];
65
66         if (!vma->node.stack) {
67                 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
68                                  vma->node.start, vma->node.size, reason);
69                 return;
70         }
71
72         nr_entries = stack_depot_fetch(vma->node.stack, &entries);
73         stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
74         DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
75                          vma->node.start, vma->node.size, reason, buf);
76 }
77
78 #else
79
80 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
81 {
82 }
83
84 #endif
85
86 static inline struct i915_vma *active_to_vma(struct i915_active *ref)
87 {
88         return container_of(ref, typeof(struct i915_vma), active);
89 }
90
91 static int __i915_vma_active(struct i915_active *ref)
92 {
93         return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
94 }
95
96 __i915_active_call
97 static void __i915_vma_retire(struct i915_active *ref)
98 {
99         i915_vma_put(active_to_vma(ref));
100 }
101
102 static struct i915_vma *
103 vma_create(struct drm_i915_gem_object *obj,
104            struct i915_address_space *vm,
105            const struct i915_ggtt_view *view)
106 {
107         struct i915_vma *pos = ERR_PTR(-E2BIG);
108         struct i915_vma *vma;
109         struct rb_node *rb, **p;
110
111         /* The aliasing_ppgtt should never be used directly! */
112         GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
113
114         vma = i915_vma_alloc();
115         if (vma == NULL)
116                 return ERR_PTR(-ENOMEM);
117
118         kref_init(&vma->ref);
119         mutex_init(&vma->pages_mutex);
120         vma->vm = i915_vm_get(vm);
121         vma->ops = &vm->vma_ops;
122         vma->obj = obj;
123         vma->resv = obj->base.resv;
124         vma->size = obj->base.size;
125         vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
126
127         i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
128
129         /* Declare ourselves safe for use inside shrinkers */
130         if (IS_ENABLED(CONFIG_LOCKDEP)) {
131                 fs_reclaim_acquire(GFP_KERNEL);
132                 might_lock(&vma->active.mutex);
133                 fs_reclaim_release(GFP_KERNEL);
134         }
135
136         INIT_LIST_HEAD(&vma->closed_link);
137
138         if (view && view->type != I915_GGTT_VIEW_NORMAL) {
139                 vma->ggtt_view = *view;
140                 if (view->type == I915_GGTT_VIEW_PARTIAL) {
141                         GEM_BUG_ON(range_overflows_t(u64,
142                                                      view->partial.offset,
143                                                      view->partial.size,
144                                                      obj->base.size >> PAGE_SHIFT));
145                         vma->size = view->partial.size;
146                         vma->size <<= PAGE_SHIFT;
147                         GEM_BUG_ON(vma->size > obj->base.size);
148                 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
149                         vma->size = intel_rotation_info_size(&view->rotated);
150                         vma->size <<= PAGE_SHIFT;
151                 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
152                         vma->size = intel_remapped_info_size(&view->remapped);
153                         vma->size <<= PAGE_SHIFT;
154                 }
155         }
156
157         if (unlikely(vma->size > vm->total))
158                 goto err_vma;
159
160         GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
161
162         spin_lock(&obj->vma.lock);
163
164         if (i915_is_ggtt(vm)) {
165                 if (unlikely(overflows_type(vma->size, u32)))
166                         goto err_unlock;
167
168                 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
169                                                       i915_gem_object_get_tiling(obj),
170                                                       i915_gem_object_get_stride(obj));
171                 if (unlikely(vma->fence_size < vma->size || /* overflow */
172                              vma->fence_size > vm->total))
173                         goto err_unlock;
174
175                 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
176
177                 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
178                                                                 i915_gem_object_get_tiling(obj),
179                                                                 i915_gem_object_get_stride(obj));
180                 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
181
182                 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
183         }
184
185         rb = NULL;
186         p = &obj->vma.tree.rb_node;
187         while (*p) {
188                 long cmp;
189
190                 rb = *p;
191                 pos = rb_entry(rb, struct i915_vma, obj_node);
192
193                 /*
194                  * If the view already exists in the tree, another thread
195                  * already created a matching vma, so return the older instance
196                  * and dispose of ours.
197                  */
198                 cmp = i915_vma_compare(pos, vm, view);
199                 if (cmp < 0)
200                         p = &rb->rb_right;
201                 else if (cmp > 0)
202                         p = &rb->rb_left;
203                 else
204                         goto err_unlock;
205         }
206         rb_link_node(&vma->obj_node, rb, p);
207         rb_insert_color(&vma->obj_node, &obj->vma.tree);
208
209         if (i915_vma_is_ggtt(vma))
210                 /*
211                  * We put the GGTT vma at the start of the vma-list, followed
212                  * by the ppGGTT vma. This allows us to break early when
213                  * iterating over only the GGTT vma for an object, see
214                  * for_each_ggtt_vma()
215                  */
216                 list_add(&vma->obj_link, &obj->vma.list);
217         else
218                 list_add_tail(&vma->obj_link, &obj->vma.list);
219
220         spin_unlock(&obj->vma.lock);
221
222         return vma;
223
224 err_unlock:
225         spin_unlock(&obj->vma.lock);
226 err_vma:
227         i915_vm_put(vm);
228         i915_vma_free(vma);
229         return pos;
230 }
231
232 static struct i915_vma *
233 vma_lookup(struct drm_i915_gem_object *obj,
234            struct i915_address_space *vm,
235            const struct i915_ggtt_view *view)
236 {
237         struct rb_node *rb;
238
239         rb = obj->vma.tree.rb_node;
240         while (rb) {
241                 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
242                 long cmp;
243
244                 cmp = i915_vma_compare(vma, vm, view);
245                 if (cmp == 0)
246                         return vma;
247
248                 if (cmp < 0)
249                         rb = rb->rb_right;
250                 else
251                         rb = rb->rb_left;
252         }
253
254         return NULL;
255 }
256
257 /**
258  * i915_vma_instance - return the singleton instance of the VMA
259  * @obj: parent &struct drm_i915_gem_object to be mapped
260  * @vm: address space in which the mapping is located
261  * @view: additional mapping requirements
262  *
263  * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
264  * the same @view characteristics. If a match is not found, one is created.
265  * Once created, the VMA is kept until either the object is freed, or the
266  * address space is closed.
267  *
268  * Returns the vma, or an error pointer.
269  */
270 struct i915_vma *
271 i915_vma_instance(struct drm_i915_gem_object *obj,
272                   struct i915_address_space *vm,
273                   const struct i915_ggtt_view *view)
274 {
275         struct i915_vma *vma;
276
277         GEM_BUG_ON(view && !i915_is_ggtt(vm));
278         GEM_BUG_ON(!atomic_read(&vm->open));
279
280         spin_lock(&obj->vma.lock);
281         vma = vma_lookup(obj, vm, view);
282         spin_unlock(&obj->vma.lock);
283
284         /* vma_create() will resolve the race if another creates the vma */
285         if (unlikely(!vma))
286                 vma = vma_create(obj, vm, view);
287
288         GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
289         return vma;
290 }
291
292 struct i915_vma_work {
293         struct dma_fence_work base;
294         struct i915_vma *vma;
295         struct drm_i915_gem_object *pinned;
296         struct i915_sw_dma_fence_cb cb;
297         enum i915_cache_level cache_level;
298         unsigned int flags;
299 };
300
301 static int __vma_bind(struct dma_fence_work *work)
302 {
303         struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
304         struct i915_vma *vma = vw->vma;
305         int err;
306
307         err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
308         if (err)
309                 atomic_or(I915_VMA_ERROR, &vma->flags);
310
311         return err;
312 }
313
314 static void __vma_release(struct dma_fence_work *work)
315 {
316         struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
317
318         if (vw->pinned)
319                 __i915_gem_object_unpin_pages(vw->pinned);
320 }
321
322 static const struct dma_fence_work_ops bind_ops = {
323         .name = "bind",
324         .work = __vma_bind,
325         .release = __vma_release,
326 };
327
328 struct i915_vma_work *i915_vma_work(void)
329 {
330         struct i915_vma_work *vw;
331
332         vw = kzalloc(sizeof(*vw), GFP_KERNEL);
333         if (!vw)
334                 return NULL;
335
336         dma_fence_work_init(&vw->base, &bind_ops);
337         vw->base.dma.error = -EAGAIN; /* disable the worker by default */
338
339         return vw;
340 }
341
342 int i915_vma_wait_for_bind(struct i915_vma *vma)
343 {
344         int err = 0;
345
346         if (rcu_access_pointer(vma->active.excl.fence)) {
347                 struct dma_fence *fence;
348
349                 rcu_read_lock();
350                 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
351                 rcu_read_unlock();
352                 if (fence) {
353                         err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
354                         dma_fence_put(fence);
355                 }
356         }
357
358         return err;
359 }
360
361 /**
362  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
363  * @vma: VMA to map
364  * @cache_level: mapping cache level
365  * @flags: flags like global or local mapping
366  * @work: preallocated worker for allocating and binding the PTE
367  *
368  * DMA addresses are taken from the scatter-gather table of this object (or of
369  * this VMA in case of non-default GGTT views) and PTE entries set up.
370  * Note that DMA addresses are also the only part of the SG table we care about.
371  */
372 int i915_vma_bind(struct i915_vma *vma,
373                   enum i915_cache_level cache_level,
374                   u32 flags,
375                   struct i915_vma_work *work)
376 {
377         u32 bind_flags;
378         u32 vma_flags;
379         int ret;
380
381         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
382         GEM_BUG_ON(vma->size > vma->node.size);
383
384         if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
385                                               vma->node.size,
386                                               vma->vm->total)))
387                 return -ENODEV;
388
389         if (GEM_DEBUG_WARN_ON(!flags))
390                 return -EINVAL;
391
392         bind_flags = flags;
393         bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
394
395         vma_flags = atomic_read(&vma->flags);
396         vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
397         if (flags & PIN_UPDATE)
398                 bind_flags |= vma_flags;
399         else
400                 bind_flags &= ~vma_flags;
401         if (bind_flags == 0)
402                 return 0;
403
404         GEM_BUG_ON(!vma->pages);
405
406         trace_i915_vma_bind(vma, bind_flags);
407         if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
408                 struct dma_fence *prev;
409
410                 work->vma = vma;
411                 work->cache_level = cache_level;
412                 work->flags = bind_flags | I915_VMA_ALLOC;
413
414                 /*
415                  * Note we only want to chain up to the migration fence on
416                  * the pages (not the object itself). As we don't track that,
417                  * yet, we have to use the exclusive fence instead.
418                  *
419                  * Also note that we do not want to track the async vma as
420                  * part of the obj->resv->excl_fence as it only affects
421                  * execution and not content or object's backing store lifetime.
422                  */
423                 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
424                 if (prev) {
425                         __i915_sw_fence_await_dma_fence(&work->base.chain,
426                                                         prev,
427                                                         &work->cb);
428                         dma_fence_put(prev);
429                 }
430
431                 work->base.dma.error = 0; /* enable the queue_work() */
432
433                 if (vma->obj) {
434                         __i915_gem_object_pin_pages(vma->obj);
435                         work->pinned = vma->obj;
436                 }
437         } else {
438                 ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
439                 if (ret)
440                         return ret;
441         }
442
443         atomic_or(bind_flags, &vma->flags);
444         return 0;
445 }
446
447 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
448 {
449         void __iomem *ptr;
450         int err;
451
452         if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
453                 err = -ENODEV;
454                 goto err;
455         }
456
457         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
458         GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
459
460         ptr = READ_ONCE(vma->iomap);
461         if (ptr == NULL) {
462                 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
463                                         vma->node.start,
464                                         vma->node.size);
465                 if (ptr == NULL) {
466                         err = -ENOMEM;
467                         goto err;
468                 }
469
470                 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
471                         io_mapping_unmap(ptr);
472                         ptr = vma->iomap;
473                 }
474         }
475
476         __i915_vma_pin(vma);
477
478         err = i915_vma_pin_fence(vma);
479         if (err)
480                 goto err_unpin;
481
482         i915_vma_set_ggtt_write(vma);
483
484         /* NB Access through the GTT requires the device to be awake. */
485         return ptr;
486
487 err_unpin:
488         __i915_vma_unpin(vma);
489 err:
490         return IO_ERR_PTR(err);
491 }
492
493 void i915_vma_flush_writes(struct i915_vma *vma)
494 {
495         if (i915_vma_unset_ggtt_write(vma))
496                 intel_gt_flush_ggtt_writes(vma->vm->gt);
497 }
498
499 void i915_vma_unpin_iomap(struct i915_vma *vma)
500 {
501         GEM_BUG_ON(vma->iomap == NULL);
502
503         i915_vma_flush_writes(vma);
504
505         i915_vma_unpin_fence(vma);
506         i915_vma_unpin(vma);
507 }
508
509 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
510 {
511         struct i915_vma *vma;
512         struct drm_i915_gem_object *obj;
513
514         vma = fetch_and_zero(p_vma);
515         if (!vma)
516                 return;
517
518         obj = vma->obj;
519         GEM_BUG_ON(!obj);
520
521         i915_vma_unpin(vma);
522
523         if (flags & I915_VMA_RELEASE_MAP)
524                 i915_gem_object_unpin_map(obj);
525
526         i915_gem_object_put(obj);
527 }
528
529 bool i915_vma_misplaced(const struct i915_vma *vma,
530                         u64 size, u64 alignment, u64 flags)
531 {
532         if (!drm_mm_node_allocated(&vma->node))
533                 return false;
534
535         if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
536                 return true;
537
538         if (vma->node.size < size)
539                 return true;
540
541         GEM_BUG_ON(alignment && !is_power_of_2(alignment));
542         if (alignment && !IS_ALIGNED(vma->node.start, alignment))
543                 return true;
544
545         if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
546                 return true;
547
548         if (flags & PIN_OFFSET_BIAS &&
549             vma->node.start < (flags & PIN_OFFSET_MASK))
550                 return true;
551
552         if (flags & PIN_OFFSET_FIXED &&
553             vma->node.start != (flags & PIN_OFFSET_MASK))
554                 return true;
555
556         return false;
557 }
558
559 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
560 {
561         bool mappable, fenceable;
562
563         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
564         GEM_BUG_ON(!vma->fence_size);
565
566         fenceable = (vma->node.size >= vma->fence_size &&
567                      IS_ALIGNED(vma->node.start, vma->fence_alignment));
568
569         mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
570
571         if (mappable && fenceable)
572                 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
573         else
574                 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
575 }
576
577 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
578 {
579         struct drm_mm_node *node = &vma->node;
580         struct drm_mm_node *other;
581
582         /*
583          * On some machines we have to be careful when putting differing types
584          * of snoopable memory together to avoid the prefetcher crossing memory
585          * domains and dying. During vm initialisation, we decide whether or not
586          * these constraints apply and set the drm_mm.color_adjust
587          * appropriately.
588          */
589         if (!i915_vm_has_cache_coloring(vma->vm))
590                 return true;
591
592         /* Only valid to be called on an already inserted vma */
593         GEM_BUG_ON(!drm_mm_node_allocated(node));
594         GEM_BUG_ON(list_empty(&node->node_list));
595
596         other = list_prev_entry(node, node_list);
597         if (i915_node_color_differs(other, color) &&
598             !drm_mm_hole_follows(other))
599                 return false;
600
601         other = list_next_entry(node, node_list);
602         if (i915_node_color_differs(other, color) &&
603             !drm_mm_hole_follows(node))
604                 return false;
605
606         return true;
607 }
608
609 /**
610  * i915_vma_insert - finds a slot for the vma in its address space
611  * @vma: the vma
612  * @size: requested size in bytes (can be larger than the VMA)
613  * @alignment: required alignment
614  * @flags: mask of PIN_* flags to use
615  *
616  * First we try to allocate some free space that meets the requirements for
617  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
618  * preferrably the oldest idle entry to make room for the new VMA.
619  *
620  * Returns:
621  * 0 on success, negative error code otherwise.
622  */
623 static int
624 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
625 {
626         unsigned long color;
627         u64 start, end;
628         int ret;
629
630         GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
631         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
632
633         size = max(size, vma->size);
634         alignment = max(alignment, vma->display_alignment);
635         if (flags & PIN_MAPPABLE) {
636                 size = max_t(typeof(size), size, vma->fence_size);
637                 alignment = max_t(typeof(alignment),
638                                   alignment, vma->fence_alignment);
639         }
640
641         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
642         GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
643         GEM_BUG_ON(!is_power_of_2(alignment));
644
645         start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
646         GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
647
648         end = vma->vm->total;
649         if (flags & PIN_MAPPABLE)
650                 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
651         if (flags & PIN_ZONE_4G)
652                 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
653         GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
654
655         /* If binding the object/GGTT view requires more space than the entire
656          * aperture has, reject it early before evicting everything in a vain
657          * attempt to find space.
658          */
659         if (size > end) {
660                 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
661                           size, flags & PIN_MAPPABLE ? "mappable" : "total",
662                           end);
663                 return -ENOSPC;
664         }
665
666         color = 0;
667         if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
668                 color = vma->obj->cache_level;
669
670         if (flags & PIN_OFFSET_FIXED) {
671                 u64 offset = flags & PIN_OFFSET_MASK;
672                 if (!IS_ALIGNED(offset, alignment) ||
673                     range_overflows(offset, size, end))
674                         return -EINVAL;
675
676                 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
677                                            size, offset, color,
678                                            flags);
679                 if (ret)
680                         return ret;
681         } else {
682                 /*
683                  * We only support huge gtt pages through the 48b PPGTT,
684                  * however we also don't want to force any alignment for
685                  * objects which need to be tightly packed into the low 32bits.
686                  *
687                  * Note that we assume that GGTT are limited to 4GiB for the
688                  * forseeable future. See also i915_ggtt_offset().
689                  */
690                 if (upper_32_bits(end - 1) &&
691                     vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
692                         /*
693                          * We can't mix 64K and 4K PTEs in the same page-table
694                          * (2M block), and so to avoid the ugliness and
695                          * complexity of coloring we opt for just aligning 64K
696                          * objects to 2M.
697                          */
698                         u64 page_alignment =
699                                 rounddown_pow_of_two(vma->page_sizes.sg |
700                                                      I915_GTT_PAGE_SIZE_2M);
701
702                         /*
703                          * Check we don't expand for the limited Global GTT
704                          * (mappable aperture is even more precious!). This
705                          * also checks that we exclude the aliasing-ppgtt.
706                          */
707                         GEM_BUG_ON(i915_vma_is_ggtt(vma));
708
709                         alignment = max(alignment, page_alignment);
710
711                         if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
712                                 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
713                 }
714
715                 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
716                                           size, alignment, color,
717                                           start, end, flags);
718                 if (ret)
719                         return ret;
720
721                 GEM_BUG_ON(vma->node.start < start);
722                 GEM_BUG_ON(vma->node.start + vma->node.size > end);
723         }
724         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
725         GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
726
727         list_add_tail(&vma->vm_link, &vma->vm->bound_list);
728
729         return 0;
730 }
731
732 static void
733 i915_vma_detach(struct i915_vma *vma)
734 {
735         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
736         GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
737
738         /*
739          * And finally now the object is completely decoupled from this
740          * vma, we can drop its hold on the backing storage and allow
741          * it to be reaped by the shrinker.
742          */
743         list_del(&vma->vm_link);
744 }
745
746 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
747 {
748         unsigned int bound;
749         bool pinned = true;
750
751         bound = atomic_read(&vma->flags);
752         do {
753                 if (unlikely(flags & ~bound))
754                         return false;
755
756                 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
757                         return false;
758
759                 if (!(bound & I915_VMA_PIN_MASK))
760                         goto unpinned;
761
762                 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
763         } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
764
765         return true;
766
767 unpinned:
768         /*
769          * If pin_count==0, but we are bound, check under the lock to avoid
770          * racing with a concurrent i915_vma_unbind().
771          */
772         mutex_lock(&vma->vm->mutex);
773         do {
774                 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
775                         pinned = false;
776                         break;
777                 }
778
779                 if (unlikely(flags & ~bound)) {
780                         pinned = false;
781                         break;
782                 }
783         } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
784         mutex_unlock(&vma->vm->mutex);
785
786         return pinned;
787 }
788
789 static int vma_get_pages(struct i915_vma *vma)
790 {
791         int err = 0;
792
793         if (atomic_add_unless(&vma->pages_count, 1, 0))
794                 return 0;
795
796         /* Allocations ahoy! */
797         if (mutex_lock_interruptible(&vma->pages_mutex))
798                 return -EINTR;
799
800         if (!atomic_read(&vma->pages_count)) {
801                 if (vma->obj) {
802                         err = i915_gem_object_pin_pages(vma->obj);
803                         if (err)
804                                 goto unlock;
805                 }
806
807                 err = vma->ops->set_pages(vma);
808                 if (err) {
809                         if (vma->obj)
810                                 i915_gem_object_unpin_pages(vma->obj);
811                         goto unlock;
812                 }
813         }
814         atomic_inc(&vma->pages_count);
815
816 unlock:
817         mutex_unlock(&vma->pages_mutex);
818
819         return err;
820 }
821
822 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
823 {
824         /* We allocate under vma_get_pages, so beware the shrinker */
825         mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
826         GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
827         if (atomic_sub_return(count, &vma->pages_count) == 0) {
828                 vma->ops->clear_pages(vma);
829                 GEM_BUG_ON(vma->pages);
830                 if (vma->obj)
831                         i915_gem_object_unpin_pages(vma->obj);
832         }
833         mutex_unlock(&vma->pages_mutex);
834 }
835
836 static void vma_put_pages(struct i915_vma *vma)
837 {
838         if (atomic_add_unless(&vma->pages_count, -1, 1))
839                 return;
840
841         __vma_put_pages(vma, 1);
842 }
843
844 static void vma_unbind_pages(struct i915_vma *vma)
845 {
846         unsigned int count;
847
848         lockdep_assert_held(&vma->vm->mutex);
849
850         /* The upper portion of pages_count is the number of bindings */
851         count = atomic_read(&vma->pages_count);
852         count >>= I915_VMA_PAGES_BIAS;
853         GEM_BUG_ON(!count);
854
855         __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
856 }
857
858 int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
859 {
860         struct i915_vma_work *work = NULL;
861         intel_wakeref_t wakeref = 0;
862         unsigned int bound;
863         int err;
864
865         BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
866         BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
867
868         GEM_BUG_ON(flags & PIN_UPDATE);
869         GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
870
871         /* First try and grab the pin without rebinding the vma */
872         if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
873                 return 0;
874
875         err = vma_get_pages(vma);
876         if (err)
877                 return err;
878
879         if (flags & vma->vm->bind_async_flags) {
880                 work = i915_vma_work();
881                 if (!work) {
882                         err = -ENOMEM;
883                         goto err_pages;
884                 }
885         }
886
887         if (flags & PIN_GLOBAL)
888                 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
889
890         /*
891          * Differentiate between user/kernel vma inside the aliasing-ppgtt.
892          *
893          * We conflate the Global GTT with the user's vma when using the
894          * aliasing-ppgtt, but it is still vitally important to try and
895          * keep the use cases distinct. For example, userptr objects are
896          * not allowed inside the Global GTT as that will cause lock
897          * inversions when we have to evict them the mmu_notifier callbacks -
898          * but they are allowed to be part of the user ppGTT which can never
899          * be mapped. As such we try to give the distinct users of the same
900          * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
901          * and i915_ppgtt separate].
902          *
903          * NB this may cause us to mask real lock inversions -- while the
904          * code is safe today, lockdep may not be able to spot future
905          * transgressions.
906          */
907         err = mutex_lock_interruptible_nested(&vma->vm->mutex,
908                                               !(flags & PIN_GLOBAL));
909         if (err)
910                 goto err_fence;
911
912         /* No more allocations allowed now we hold vm->mutex */
913
914         if (unlikely(i915_vma_is_closed(vma))) {
915                 err = -ENOENT;
916                 goto err_unlock;
917         }
918
919         bound = atomic_read(&vma->flags);
920         if (unlikely(bound & I915_VMA_ERROR)) {
921                 err = -ENOMEM;
922                 goto err_unlock;
923         }
924
925         if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
926                 err = -EAGAIN; /* pins are meant to be fairly temporary */
927                 goto err_unlock;
928         }
929
930         if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
931                 __i915_vma_pin(vma);
932                 goto err_unlock;
933         }
934
935         err = i915_active_acquire(&vma->active);
936         if (err)
937                 goto err_unlock;
938
939         if (!(bound & I915_VMA_BIND_MASK)) {
940                 err = i915_vma_insert(vma, size, alignment, flags);
941                 if (err)
942                         goto err_active;
943
944                 if (i915_is_ggtt(vma->vm))
945                         __i915_vma_set_map_and_fenceable(vma);
946         }
947
948         GEM_BUG_ON(!vma->pages);
949         err = i915_vma_bind(vma,
950                             vma->obj ? vma->obj->cache_level : 0,
951                             flags, work);
952         if (err)
953                 goto err_remove;
954
955         /* There should only be at most 2 active bindings (user, global) */
956         GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
957         atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
958         list_move_tail(&vma->vm_link, &vma->vm->bound_list);
959
960         __i915_vma_pin(vma);
961         GEM_BUG_ON(!i915_vma_is_pinned(vma));
962         GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
963         GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
964
965 err_remove:
966         if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
967                 i915_vma_detach(vma);
968                 drm_mm_remove_node(&vma->node);
969         }
970 err_active:
971         i915_active_release(&vma->active);
972 err_unlock:
973         mutex_unlock(&vma->vm->mutex);
974 err_fence:
975         if (work)
976                 dma_fence_work_commit_imm(&work->base);
977         if (wakeref)
978                 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
979 err_pages:
980         vma_put_pages(vma);
981         return err;
982 }
983
984 static void flush_idle_contexts(struct intel_gt *gt)
985 {
986         struct intel_engine_cs *engine;
987         enum intel_engine_id id;
988
989         for_each_engine(engine, gt, id)
990                 intel_engine_flush_barriers(engine);
991
992         intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
993 }
994
995 int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
996 {
997         struct i915_address_space *vm = vma->vm;
998         int err;
999
1000         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1001
1002         do {
1003                 err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
1004                 if (err != -ENOSPC) {
1005                         if (!err) {
1006                                 err = i915_vma_wait_for_bind(vma);
1007                                 if (err)
1008                                         i915_vma_unpin(vma);
1009                         }
1010                         return err;
1011                 }
1012
1013                 /* Unlike i915_vma_pin, we don't take no for an answer! */
1014                 flush_idle_contexts(vm->gt);
1015                 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1016                         i915_gem_evict_vm(vm);
1017                         mutex_unlock(&vm->mutex);
1018                 }
1019         } while (1);
1020 }
1021
1022 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1023 {
1024         /*
1025          * We defer actually closing, unbinding and destroying the VMA until
1026          * the next idle point, or if the object is freed in the meantime. By
1027          * postponing the unbind, we allow for it to be resurrected by the
1028          * client, avoiding the work required to rebind the VMA. This is
1029          * advantageous for DRI, where the client/server pass objects
1030          * between themselves, temporarily opening a local VMA to the
1031          * object, and then closing it again. The same object is then reused
1032          * on the next frame (or two, depending on the depth of the swap queue)
1033          * causing us to rebind the VMA once more. This ends up being a lot
1034          * of wasted work for the steady state.
1035          */
1036         GEM_BUG_ON(i915_vma_is_closed(vma));
1037         list_add(&vma->closed_link, &gt->closed_vma);
1038 }
1039
1040 void i915_vma_close(struct i915_vma *vma)
1041 {
1042         struct intel_gt *gt = vma->vm->gt;
1043         unsigned long flags;
1044
1045         if (i915_vma_is_ggtt(vma))
1046                 return;
1047
1048         GEM_BUG_ON(!atomic_read(&vma->open_count));
1049         if (atomic_dec_and_lock_irqsave(&vma->open_count,
1050                                         &gt->closed_lock,
1051                                         flags)) {
1052                 __vma_close(vma, gt);
1053                 spin_unlock_irqrestore(&gt->closed_lock, flags);
1054         }
1055 }
1056
1057 static void __i915_vma_remove_closed(struct i915_vma *vma)
1058 {
1059         struct intel_gt *gt = vma->vm->gt;
1060
1061         spin_lock_irq(&gt->closed_lock);
1062         list_del_init(&vma->closed_link);
1063         spin_unlock_irq(&gt->closed_lock);
1064 }
1065
1066 void i915_vma_reopen(struct i915_vma *vma)
1067 {
1068         if (i915_vma_is_closed(vma))
1069                 __i915_vma_remove_closed(vma);
1070 }
1071
1072 void i915_vma_release(struct kref *ref)
1073 {
1074         struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1075
1076         if (drm_mm_node_allocated(&vma->node)) {
1077                 mutex_lock(&vma->vm->mutex);
1078                 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1079                 WARN_ON(__i915_vma_unbind(vma));
1080                 mutex_unlock(&vma->vm->mutex);
1081                 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1082         }
1083         GEM_BUG_ON(i915_vma_is_active(vma));
1084
1085         if (vma->obj) {
1086                 struct drm_i915_gem_object *obj = vma->obj;
1087
1088                 spin_lock(&obj->vma.lock);
1089                 list_del(&vma->obj_link);
1090                 rb_erase(&vma->obj_node, &obj->vma.tree);
1091                 spin_unlock(&obj->vma.lock);
1092         }
1093
1094         __i915_vma_remove_closed(vma);
1095         i915_vm_put(vma->vm);
1096
1097         i915_active_fini(&vma->active);
1098         i915_vma_free(vma);
1099 }
1100
1101 void i915_vma_parked(struct intel_gt *gt)
1102 {
1103         struct i915_vma *vma, *next;
1104         LIST_HEAD(closed);
1105
1106         spin_lock_irq(&gt->closed_lock);
1107         list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
1108                 struct drm_i915_gem_object *obj = vma->obj;
1109                 struct i915_address_space *vm = vma->vm;
1110
1111                 /* XXX All to avoid keeping a reference on i915_vma itself */
1112
1113                 if (!kref_get_unless_zero(&obj->base.refcount))
1114                         continue;
1115
1116                 if (!i915_vm_tryopen(vm)) {
1117                         i915_gem_object_put(obj);
1118                         continue;
1119                 }
1120
1121                 list_move(&vma->closed_link, &closed);
1122         }
1123         spin_unlock_irq(&gt->closed_lock);
1124
1125         /* As the GT is held idle, no vma can be reopened as we destroy them */
1126         list_for_each_entry_safe(vma, next, &closed, closed_link) {
1127                 struct drm_i915_gem_object *obj = vma->obj;
1128                 struct i915_address_space *vm = vma->vm;
1129
1130                 INIT_LIST_HEAD(&vma->closed_link);
1131                 __i915_vma_put(vma);
1132
1133                 i915_gem_object_put(obj);
1134                 i915_vm_close(vm);
1135         }
1136 }
1137
1138 static void __i915_vma_iounmap(struct i915_vma *vma)
1139 {
1140         GEM_BUG_ON(i915_vma_is_pinned(vma));
1141
1142         if (vma->iomap == NULL)
1143                 return;
1144
1145         io_mapping_unmap(vma->iomap);
1146         vma->iomap = NULL;
1147 }
1148
1149 void i915_vma_revoke_mmap(struct i915_vma *vma)
1150 {
1151         struct drm_vma_offset_node *node;
1152         u64 vma_offset;
1153
1154         if (!i915_vma_has_userfault(vma))
1155                 return;
1156
1157         GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1158         GEM_BUG_ON(!vma->obj->userfault_count);
1159
1160         node = &vma->mmo->vma_node;
1161         vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1162         unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1163                             drm_vma_node_offset_addr(node) + vma_offset,
1164                             vma->size,
1165                             1);
1166
1167         i915_vma_unset_userfault(vma);
1168         if (!--vma->obj->userfault_count)
1169                 list_del(&vma->obj->userfault_link);
1170 }
1171
1172 int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1173 {
1174         int err;
1175
1176         GEM_BUG_ON(!i915_vma_is_pinned(vma));
1177
1178         /* Wait for the vma to be bound before we start! */
1179         err = i915_request_await_active(rq, &vma->active,
1180                                         I915_ACTIVE_AWAIT_EXCL);
1181         if (err)
1182                 return err;
1183
1184         return i915_active_add_request(&vma->active, rq);
1185 }
1186
1187 int i915_vma_move_to_active(struct i915_vma *vma,
1188                             struct i915_request *rq,
1189                             unsigned int flags)
1190 {
1191         struct drm_i915_gem_object *obj = vma->obj;
1192         int err;
1193
1194         assert_object_held(obj);
1195
1196         err = __i915_vma_move_to_active(vma, rq);
1197         if (unlikely(err))
1198                 return err;
1199
1200         if (flags & EXEC_OBJECT_WRITE) {
1201                 struct intel_frontbuffer *front;
1202
1203                 front = __intel_frontbuffer_get(obj);
1204                 if (unlikely(front)) {
1205                         if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1206                                 i915_active_add_request(&front->write, rq);
1207                         intel_frontbuffer_put(front);
1208                 }
1209
1210                 dma_resv_add_excl_fence(vma->resv, &rq->fence);
1211                 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1212                 obj->read_domains = 0;
1213         } else {
1214                 err = dma_resv_reserve_shared(vma->resv, 1);
1215                 if (unlikely(err))
1216                         return err;
1217
1218                 dma_resv_add_shared_fence(vma->resv, &rq->fence);
1219                 obj->write_domain = 0;
1220         }
1221
1222         if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1223                 i915_active_add_request(&vma->fence->active, rq);
1224
1225         obj->read_domains |= I915_GEM_GPU_DOMAINS;
1226         obj->mm.dirty = true;
1227
1228         GEM_BUG_ON(!i915_vma_is_active(vma));
1229         return 0;
1230 }
1231
1232 int __i915_vma_unbind(struct i915_vma *vma)
1233 {
1234         int ret;
1235
1236         lockdep_assert_held(&vma->vm->mutex);
1237
1238         if (i915_vma_is_pinned(vma)) {
1239                 vma_print_allocator(vma, "is pinned");
1240                 return -EAGAIN;
1241         }
1242
1243         /*
1244          * After confirming that no one else is pinning this vma, wait for
1245          * any laggards who may have crept in during the wait (through
1246          * a residual pin skipping the vm->mutex) to complete.
1247          */
1248         ret = i915_vma_sync(vma);
1249         if (ret)
1250                 return ret;
1251
1252         if (!drm_mm_node_allocated(&vma->node))
1253                 return 0;
1254
1255         GEM_BUG_ON(i915_vma_is_pinned(vma));
1256         GEM_BUG_ON(i915_vma_is_active(vma));
1257
1258         if (i915_vma_is_map_and_fenceable(vma)) {
1259                 /* Force a pagefault for domain tracking on next user access */
1260                 i915_vma_revoke_mmap(vma);
1261
1262                 /*
1263                  * Check that we have flushed all writes through the GGTT
1264                  * before the unbind, other due to non-strict nature of those
1265                  * indirect writes they may end up referencing the GGTT PTE
1266                  * after the unbind.
1267                  *
1268                  * Note that we may be concurrently poking at the GGTT_WRITE
1269                  * bit from set-domain, as we mark all GGTT vma associated
1270                  * with an object. We know this is for another vma, as we
1271                  * are currently unbinding this one -- so if this vma will be
1272                  * reused, it will be refaulted and have its dirty bit set
1273                  * before the next write.
1274                  */
1275                 i915_vma_flush_writes(vma);
1276
1277                 /* release the fence reg _after_ flushing */
1278                 i915_vma_revoke_fence(vma);
1279
1280                 __i915_vma_iounmap(vma);
1281                 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1282         }
1283         GEM_BUG_ON(vma->fence);
1284         GEM_BUG_ON(i915_vma_has_userfault(vma));
1285
1286         if (likely(atomic_read(&vma->vm->open))) {
1287                 trace_i915_vma_unbind(vma);
1288                 vma->ops->unbind_vma(vma);
1289         }
1290         atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1291                    &vma->flags);
1292
1293         i915_vma_detach(vma);
1294         vma_unbind_pages(vma);
1295
1296         drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
1297         return 0;
1298 }
1299
1300 int i915_vma_unbind(struct i915_vma *vma)
1301 {
1302         struct i915_address_space *vm = vma->vm;
1303         intel_wakeref_t wakeref = 0;
1304         int err;
1305
1306         if (!drm_mm_node_allocated(&vma->node))
1307                 return 0;
1308
1309         /* Optimistic wait before taking the mutex */
1310         err = i915_vma_sync(vma);
1311         if (err)
1312                 goto out_rpm;
1313
1314         if (i915_vma_is_pinned(vma)) {
1315                 vma_print_allocator(vma, "is pinned");
1316                 return -EAGAIN;
1317         }
1318
1319         if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1320                 /* XXX not always required: nop_clear_range */
1321                 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1322
1323         err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
1324         if (err)
1325                 goto out_rpm;
1326
1327         err = __i915_vma_unbind(vma);
1328         mutex_unlock(&vm->mutex);
1329
1330 out_rpm:
1331         if (wakeref)
1332                 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
1333         return err;
1334 }
1335
1336 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1337 {
1338         i915_gem_object_make_unshrinkable(vma->obj);
1339         return vma;
1340 }
1341
1342 void i915_vma_make_shrinkable(struct i915_vma *vma)
1343 {
1344         i915_gem_object_make_shrinkable(vma->obj);
1345 }
1346
1347 void i915_vma_make_purgeable(struct i915_vma *vma)
1348 {
1349         i915_gem_object_make_purgeable(vma->obj);
1350 }
1351
1352 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1353 #include "selftests/i915_vma.c"
1354 #endif
1355
1356 static void i915_global_vma_shrink(void)
1357 {
1358         kmem_cache_shrink(global.slab_vmas);
1359 }
1360
1361 static void i915_global_vma_exit(void)
1362 {
1363         kmem_cache_destroy(global.slab_vmas);
1364 }
1365
1366 static struct i915_global_vma global = { {
1367         .shrink = i915_global_vma_shrink,
1368         .exit = i915_global_vma_exit,
1369 } };
1370
1371 int __init i915_global_vma_init(void)
1372 {
1373         global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1374         if (!global.slab_vmas)
1375                 return -ENOMEM;
1376
1377         i915_global_register(&global.base);
1378         return 0;
1379 }