arm64: vdso32: Fix '--prefix=' value for newer versions of clang
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_vma.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/sched/mm.h>
26 #include <drm/drm_gem.h>
27
28 #include "display/intel_frontbuffer.h"
29
30 #include "gt/intel_engine.h"
31 #include "gt/intel_engine_heartbeat.h"
32 #include "gt/intel_gt.h"
33 #include "gt/intel_gt_requests.h"
34
35 #include "i915_drv.h"
36 #include "i915_globals.h"
37 #include "i915_sw_fence_work.h"
38 #include "i915_trace.h"
39 #include "i915_vma.h"
40
41 static struct i915_global_vma {
42         struct i915_global base;
43         struct kmem_cache *slab_vmas;
44 } global;
45
46 struct i915_vma *i915_vma_alloc(void)
47 {
48         return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
49 }
50
51 void i915_vma_free(struct i915_vma *vma)
52 {
53         return kmem_cache_free(global.slab_vmas, vma);
54 }
55
56 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
57
58 #include <linux/stackdepot.h>
59
60 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
61 {
62         unsigned long *entries;
63         unsigned int nr_entries;
64         char buf[512];
65
66         if (!vma->node.stack) {
67                 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
68                                  vma->node.start, vma->node.size, reason);
69                 return;
70         }
71
72         nr_entries = stack_depot_fetch(vma->node.stack, &entries);
73         stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
74         DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
75                          vma->node.start, vma->node.size, reason, buf);
76 }
77
78 #else
79
80 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
81 {
82 }
83
84 #endif
85
86 static inline struct i915_vma *active_to_vma(struct i915_active *ref)
87 {
88         return container_of(ref, typeof(struct i915_vma), active);
89 }
90
91 static int __i915_vma_active(struct i915_active *ref)
92 {
93         return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
94 }
95
96 __i915_active_call
97 static void __i915_vma_retire(struct i915_active *ref)
98 {
99         i915_vma_put(active_to_vma(ref));
100 }
101
102 static struct i915_vma *
103 vma_create(struct drm_i915_gem_object *obj,
104            struct i915_address_space *vm,
105            const struct i915_ggtt_view *view)
106 {
107         struct i915_vma *vma;
108         struct rb_node *rb, **p;
109
110         /* The aliasing_ppgtt should never be used directly! */
111         GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
112
113         vma = i915_vma_alloc();
114         if (vma == NULL)
115                 return ERR_PTR(-ENOMEM);
116
117         kref_init(&vma->ref);
118         mutex_init(&vma->pages_mutex);
119         vma->vm = i915_vm_get(vm);
120         vma->ops = &vm->vma_ops;
121         vma->obj = obj;
122         vma->resv = obj->base.resv;
123         vma->size = obj->base.size;
124         vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
125
126         i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
127
128         /* Declare ourselves safe for use inside shrinkers */
129         if (IS_ENABLED(CONFIG_LOCKDEP)) {
130                 fs_reclaim_acquire(GFP_KERNEL);
131                 might_lock(&vma->active.mutex);
132                 fs_reclaim_release(GFP_KERNEL);
133         }
134
135         INIT_LIST_HEAD(&vma->closed_link);
136
137         if (view && view->type != I915_GGTT_VIEW_NORMAL) {
138                 vma->ggtt_view = *view;
139                 if (view->type == I915_GGTT_VIEW_PARTIAL) {
140                         GEM_BUG_ON(range_overflows_t(u64,
141                                                      view->partial.offset,
142                                                      view->partial.size,
143                                                      obj->base.size >> PAGE_SHIFT));
144                         vma->size = view->partial.size;
145                         vma->size <<= PAGE_SHIFT;
146                         GEM_BUG_ON(vma->size > obj->base.size);
147                 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
148                         vma->size = intel_rotation_info_size(&view->rotated);
149                         vma->size <<= PAGE_SHIFT;
150                 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
151                         vma->size = intel_remapped_info_size(&view->remapped);
152                         vma->size <<= PAGE_SHIFT;
153                 }
154         }
155
156         if (unlikely(vma->size > vm->total))
157                 goto err_vma;
158
159         GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
160
161         spin_lock(&obj->vma.lock);
162
163         if (i915_is_ggtt(vm)) {
164                 if (unlikely(overflows_type(vma->size, u32)))
165                         goto err_unlock;
166
167                 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
168                                                       i915_gem_object_get_tiling(obj),
169                                                       i915_gem_object_get_stride(obj));
170                 if (unlikely(vma->fence_size < vma->size || /* overflow */
171                              vma->fence_size > vm->total))
172                         goto err_unlock;
173
174                 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
175
176                 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
177                                                                 i915_gem_object_get_tiling(obj),
178                                                                 i915_gem_object_get_stride(obj));
179                 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
180
181                 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
182         }
183
184         rb = NULL;
185         p = &obj->vma.tree.rb_node;
186         while (*p) {
187                 struct i915_vma *pos;
188                 long cmp;
189
190                 rb = *p;
191                 pos = rb_entry(rb, struct i915_vma, obj_node);
192
193                 /*
194                  * If the view already exists in the tree, another thread
195                  * already created a matching vma, so return the older instance
196                  * and dispose of ours.
197                  */
198                 cmp = i915_vma_compare(pos, vm, view);
199                 if (cmp == 0) {
200                         spin_unlock(&obj->vma.lock);
201                         i915_vma_free(vma);
202                         return pos;
203                 }
204
205                 if (cmp < 0)
206                         p = &rb->rb_right;
207                 else
208                         p = &rb->rb_left;
209         }
210         rb_link_node(&vma->obj_node, rb, p);
211         rb_insert_color(&vma->obj_node, &obj->vma.tree);
212
213         if (i915_vma_is_ggtt(vma))
214                 /*
215                  * We put the GGTT vma at the start of the vma-list, followed
216                  * by the ppGGTT vma. This allows us to break early when
217                  * iterating over only the GGTT vma for an object, see
218                  * for_each_ggtt_vma()
219                  */
220                 list_add(&vma->obj_link, &obj->vma.list);
221         else
222                 list_add_tail(&vma->obj_link, &obj->vma.list);
223
224         spin_unlock(&obj->vma.lock);
225
226         return vma;
227
228 err_unlock:
229         spin_unlock(&obj->vma.lock);
230 err_vma:
231         i915_vma_free(vma);
232         return ERR_PTR(-E2BIG);
233 }
234
235 static struct i915_vma *
236 vma_lookup(struct drm_i915_gem_object *obj,
237            struct i915_address_space *vm,
238            const struct i915_ggtt_view *view)
239 {
240         struct rb_node *rb;
241
242         rb = obj->vma.tree.rb_node;
243         while (rb) {
244                 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
245                 long cmp;
246
247                 cmp = i915_vma_compare(vma, vm, view);
248                 if (cmp == 0)
249                         return vma;
250
251                 if (cmp < 0)
252                         rb = rb->rb_right;
253                 else
254                         rb = rb->rb_left;
255         }
256
257         return NULL;
258 }
259
260 /**
261  * i915_vma_instance - return the singleton instance of the VMA
262  * @obj: parent &struct drm_i915_gem_object to be mapped
263  * @vm: address space in which the mapping is located
264  * @view: additional mapping requirements
265  *
266  * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
267  * the same @view characteristics. If a match is not found, one is created.
268  * Once created, the VMA is kept until either the object is freed, or the
269  * address space is closed.
270  *
271  * Returns the vma, or an error pointer.
272  */
273 struct i915_vma *
274 i915_vma_instance(struct drm_i915_gem_object *obj,
275                   struct i915_address_space *vm,
276                   const struct i915_ggtt_view *view)
277 {
278         struct i915_vma *vma;
279
280         GEM_BUG_ON(view && !i915_is_ggtt(vm));
281         GEM_BUG_ON(!atomic_read(&vm->open));
282
283         spin_lock(&obj->vma.lock);
284         vma = vma_lookup(obj, vm, view);
285         spin_unlock(&obj->vma.lock);
286
287         /* vma_create() will resolve the race if another creates the vma */
288         if (unlikely(!vma))
289                 vma = vma_create(obj, vm, view);
290
291         GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
292         return vma;
293 }
294
295 struct i915_vma_work {
296         struct dma_fence_work base;
297         struct i915_vma *vma;
298         struct drm_i915_gem_object *pinned;
299         struct i915_sw_dma_fence_cb cb;
300         enum i915_cache_level cache_level;
301         unsigned int flags;
302 };
303
304 static int __vma_bind(struct dma_fence_work *work)
305 {
306         struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
307         struct i915_vma *vma = vw->vma;
308         int err;
309
310         err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
311         if (err)
312                 atomic_or(I915_VMA_ERROR, &vma->flags);
313
314         return err;
315 }
316
317 static void __vma_release(struct dma_fence_work *work)
318 {
319         struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
320
321         if (vw->pinned)
322                 __i915_gem_object_unpin_pages(vw->pinned);
323 }
324
325 static const struct dma_fence_work_ops bind_ops = {
326         .name = "bind",
327         .work = __vma_bind,
328         .release = __vma_release,
329 };
330
331 struct i915_vma_work *i915_vma_work(void)
332 {
333         struct i915_vma_work *vw;
334
335         vw = kzalloc(sizeof(*vw), GFP_KERNEL);
336         if (!vw)
337                 return NULL;
338
339         dma_fence_work_init(&vw->base, &bind_ops);
340         vw->base.dma.error = -EAGAIN; /* disable the worker by default */
341
342         return vw;
343 }
344
345 int i915_vma_wait_for_bind(struct i915_vma *vma)
346 {
347         int err = 0;
348
349         if (rcu_access_pointer(vma->active.excl.fence)) {
350                 struct dma_fence *fence;
351
352                 rcu_read_lock();
353                 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
354                 rcu_read_unlock();
355                 if (fence) {
356                         err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
357                         dma_fence_put(fence);
358                 }
359         }
360
361         return err;
362 }
363
364 /**
365  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
366  * @vma: VMA to map
367  * @cache_level: mapping cache level
368  * @flags: flags like global or local mapping
369  * @work: preallocated worker for allocating and binding the PTE
370  *
371  * DMA addresses are taken from the scatter-gather table of this object (or of
372  * this VMA in case of non-default GGTT views) and PTE entries set up.
373  * Note that DMA addresses are also the only part of the SG table we care about.
374  */
375 int i915_vma_bind(struct i915_vma *vma,
376                   enum i915_cache_level cache_level,
377                   u32 flags,
378                   struct i915_vma_work *work)
379 {
380         u32 bind_flags;
381         u32 vma_flags;
382         int ret;
383
384         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
385         GEM_BUG_ON(vma->size > vma->node.size);
386
387         if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
388                                               vma->node.size,
389                                               vma->vm->total)))
390                 return -ENODEV;
391
392         if (GEM_DEBUG_WARN_ON(!flags))
393                 return -EINVAL;
394
395         bind_flags = flags;
396         bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
397
398         vma_flags = atomic_read(&vma->flags);
399         vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
400         if (flags & PIN_UPDATE)
401                 bind_flags |= vma_flags;
402         else
403                 bind_flags &= ~vma_flags;
404         if (bind_flags == 0)
405                 return 0;
406
407         GEM_BUG_ON(!vma->pages);
408
409         trace_i915_vma_bind(vma, bind_flags);
410         if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
411                 struct dma_fence *prev;
412
413                 work->vma = vma;
414                 work->cache_level = cache_level;
415                 work->flags = bind_flags | I915_VMA_ALLOC;
416
417                 /*
418                  * Note we only want to chain up to the migration fence on
419                  * the pages (not the object itself). As we don't track that,
420                  * yet, we have to use the exclusive fence instead.
421                  *
422                  * Also note that we do not want to track the async vma as
423                  * part of the obj->resv->excl_fence as it only affects
424                  * execution and not content or object's backing store lifetime.
425                  */
426                 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
427                 if (prev) {
428                         __i915_sw_fence_await_dma_fence(&work->base.chain,
429                                                         prev,
430                                                         &work->cb);
431                         dma_fence_put(prev);
432                 }
433
434                 work->base.dma.error = 0; /* enable the queue_work() */
435
436                 if (vma->obj) {
437                         __i915_gem_object_pin_pages(vma->obj);
438                         work->pinned = vma->obj;
439                 }
440         } else {
441                 ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
442                 if (ret)
443                         return ret;
444         }
445
446         atomic_or(bind_flags, &vma->flags);
447         return 0;
448 }
449
450 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
451 {
452         void __iomem *ptr;
453         int err;
454
455         if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
456                 err = -ENODEV;
457                 goto err;
458         }
459
460         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
461         GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
462
463         ptr = READ_ONCE(vma->iomap);
464         if (ptr == NULL) {
465                 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
466                                         vma->node.start,
467                                         vma->node.size);
468                 if (ptr == NULL) {
469                         err = -ENOMEM;
470                         goto err;
471                 }
472
473                 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
474                         io_mapping_unmap(ptr);
475                         ptr = vma->iomap;
476                 }
477         }
478
479         __i915_vma_pin(vma);
480
481         err = i915_vma_pin_fence(vma);
482         if (err)
483                 goto err_unpin;
484
485         i915_vma_set_ggtt_write(vma);
486
487         /* NB Access through the GTT requires the device to be awake. */
488         return ptr;
489
490 err_unpin:
491         __i915_vma_unpin(vma);
492 err:
493         return IO_ERR_PTR(err);
494 }
495
496 void i915_vma_flush_writes(struct i915_vma *vma)
497 {
498         if (i915_vma_unset_ggtt_write(vma))
499                 intel_gt_flush_ggtt_writes(vma->vm->gt);
500 }
501
502 void i915_vma_unpin_iomap(struct i915_vma *vma)
503 {
504         GEM_BUG_ON(vma->iomap == NULL);
505
506         i915_vma_flush_writes(vma);
507
508         i915_vma_unpin_fence(vma);
509         i915_vma_unpin(vma);
510 }
511
512 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
513 {
514         struct i915_vma *vma;
515         struct drm_i915_gem_object *obj;
516
517         vma = fetch_and_zero(p_vma);
518         if (!vma)
519                 return;
520
521         obj = vma->obj;
522         GEM_BUG_ON(!obj);
523
524         i915_vma_unpin(vma);
525
526         if (flags & I915_VMA_RELEASE_MAP)
527                 i915_gem_object_unpin_map(obj);
528
529         i915_gem_object_put(obj);
530 }
531
532 bool i915_vma_misplaced(const struct i915_vma *vma,
533                         u64 size, u64 alignment, u64 flags)
534 {
535         if (!drm_mm_node_allocated(&vma->node))
536                 return false;
537
538         if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
539                 return true;
540
541         if (vma->node.size < size)
542                 return true;
543
544         GEM_BUG_ON(alignment && !is_power_of_2(alignment));
545         if (alignment && !IS_ALIGNED(vma->node.start, alignment))
546                 return true;
547
548         if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
549                 return true;
550
551         if (flags & PIN_OFFSET_BIAS &&
552             vma->node.start < (flags & PIN_OFFSET_MASK))
553                 return true;
554
555         if (flags & PIN_OFFSET_FIXED &&
556             vma->node.start != (flags & PIN_OFFSET_MASK))
557                 return true;
558
559         return false;
560 }
561
562 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
563 {
564         bool mappable, fenceable;
565
566         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
567         GEM_BUG_ON(!vma->fence_size);
568
569         fenceable = (vma->node.size >= vma->fence_size &&
570                      IS_ALIGNED(vma->node.start, vma->fence_alignment));
571
572         mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
573
574         if (mappable && fenceable)
575                 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
576         else
577                 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
578 }
579
580 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
581 {
582         struct drm_mm_node *node = &vma->node;
583         struct drm_mm_node *other;
584
585         /*
586          * On some machines we have to be careful when putting differing types
587          * of snoopable memory together to avoid the prefetcher crossing memory
588          * domains and dying. During vm initialisation, we decide whether or not
589          * these constraints apply and set the drm_mm.color_adjust
590          * appropriately.
591          */
592         if (!i915_vm_has_cache_coloring(vma->vm))
593                 return true;
594
595         /* Only valid to be called on an already inserted vma */
596         GEM_BUG_ON(!drm_mm_node_allocated(node));
597         GEM_BUG_ON(list_empty(&node->node_list));
598
599         other = list_prev_entry(node, node_list);
600         if (i915_node_color_differs(other, color) &&
601             !drm_mm_hole_follows(other))
602                 return false;
603
604         other = list_next_entry(node, node_list);
605         if (i915_node_color_differs(other, color) &&
606             !drm_mm_hole_follows(node))
607                 return false;
608
609         return true;
610 }
611
612 /**
613  * i915_vma_insert - finds a slot for the vma in its address space
614  * @vma: the vma
615  * @size: requested size in bytes (can be larger than the VMA)
616  * @alignment: required alignment
617  * @flags: mask of PIN_* flags to use
618  *
619  * First we try to allocate some free space that meets the requirements for
620  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
621  * preferrably the oldest idle entry to make room for the new VMA.
622  *
623  * Returns:
624  * 0 on success, negative error code otherwise.
625  */
626 static int
627 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
628 {
629         unsigned long color;
630         u64 start, end;
631         int ret;
632
633         GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
634         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
635
636         size = max(size, vma->size);
637         alignment = max(alignment, vma->display_alignment);
638         if (flags & PIN_MAPPABLE) {
639                 size = max_t(typeof(size), size, vma->fence_size);
640                 alignment = max_t(typeof(alignment),
641                                   alignment, vma->fence_alignment);
642         }
643
644         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
645         GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
646         GEM_BUG_ON(!is_power_of_2(alignment));
647
648         start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
649         GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
650
651         end = vma->vm->total;
652         if (flags & PIN_MAPPABLE)
653                 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
654         if (flags & PIN_ZONE_4G)
655                 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
656         GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
657
658         /* If binding the object/GGTT view requires more space than the entire
659          * aperture has, reject it early before evicting everything in a vain
660          * attempt to find space.
661          */
662         if (size > end) {
663                 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
664                           size, flags & PIN_MAPPABLE ? "mappable" : "total",
665                           end);
666                 return -ENOSPC;
667         }
668
669         color = 0;
670         if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
671                 color = vma->obj->cache_level;
672
673         if (flags & PIN_OFFSET_FIXED) {
674                 u64 offset = flags & PIN_OFFSET_MASK;
675                 if (!IS_ALIGNED(offset, alignment) ||
676                     range_overflows(offset, size, end))
677                         return -EINVAL;
678
679                 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
680                                            size, offset, color,
681                                            flags);
682                 if (ret)
683                         return ret;
684         } else {
685                 /*
686                  * We only support huge gtt pages through the 48b PPGTT,
687                  * however we also don't want to force any alignment for
688                  * objects which need to be tightly packed into the low 32bits.
689                  *
690                  * Note that we assume that GGTT are limited to 4GiB for the
691                  * forseeable future. See also i915_ggtt_offset().
692                  */
693                 if (upper_32_bits(end - 1) &&
694                     vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
695                         /*
696                          * We can't mix 64K and 4K PTEs in the same page-table
697                          * (2M block), and so to avoid the ugliness and
698                          * complexity of coloring we opt for just aligning 64K
699                          * objects to 2M.
700                          */
701                         u64 page_alignment =
702                                 rounddown_pow_of_two(vma->page_sizes.sg |
703                                                      I915_GTT_PAGE_SIZE_2M);
704
705                         /*
706                          * Check we don't expand for the limited Global GTT
707                          * (mappable aperture is even more precious!). This
708                          * also checks that we exclude the aliasing-ppgtt.
709                          */
710                         GEM_BUG_ON(i915_vma_is_ggtt(vma));
711
712                         alignment = max(alignment, page_alignment);
713
714                         if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
715                                 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
716                 }
717
718                 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
719                                           size, alignment, color,
720                                           start, end, flags);
721                 if (ret)
722                         return ret;
723
724                 GEM_BUG_ON(vma->node.start < start);
725                 GEM_BUG_ON(vma->node.start + vma->node.size > end);
726         }
727         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
728         GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
729
730         list_add_tail(&vma->vm_link, &vma->vm->bound_list);
731
732         return 0;
733 }
734
735 static void
736 i915_vma_detach(struct i915_vma *vma)
737 {
738         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
739         GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
740
741         /*
742          * And finally now the object is completely decoupled from this
743          * vma, we can drop its hold on the backing storage and allow
744          * it to be reaped by the shrinker.
745          */
746         list_del(&vma->vm_link);
747 }
748
749 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
750 {
751         unsigned int bound;
752         bool pinned = true;
753
754         bound = atomic_read(&vma->flags);
755         do {
756                 if (unlikely(flags & ~bound))
757                         return false;
758
759                 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
760                         return false;
761
762                 if (!(bound & I915_VMA_PIN_MASK))
763                         goto unpinned;
764
765                 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
766         } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
767
768         return true;
769
770 unpinned:
771         /*
772          * If pin_count==0, but we are bound, check under the lock to avoid
773          * racing with a concurrent i915_vma_unbind().
774          */
775         mutex_lock(&vma->vm->mutex);
776         do {
777                 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
778                         pinned = false;
779                         break;
780                 }
781
782                 if (unlikely(flags & ~bound)) {
783                         pinned = false;
784                         break;
785                 }
786         } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
787         mutex_unlock(&vma->vm->mutex);
788
789         return pinned;
790 }
791
792 static int vma_get_pages(struct i915_vma *vma)
793 {
794         int err = 0;
795
796         if (atomic_add_unless(&vma->pages_count, 1, 0))
797                 return 0;
798
799         /* Allocations ahoy! */
800         if (mutex_lock_interruptible(&vma->pages_mutex))
801                 return -EINTR;
802
803         if (!atomic_read(&vma->pages_count)) {
804                 if (vma->obj) {
805                         err = i915_gem_object_pin_pages(vma->obj);
806                         if (err)
807                                 goto unlock;
808                 }
809
810                 err = vma->ops->set_pages(vma);
811                 if (err) {
812                         if (vma->obj)
813                                 i915_gem_object_unpin_pages(vma->obj);
814                         goto unlock;
815                 }
816         }
817         atomic_inc(&vma->pages_count);
818
819 unlock:
820         mutex_unlock(&vma->pages_mutex);
821
822         return err;
823 }
824
825 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
826 {
827         /* We allocate under vma_get_pages, so beware the shrinker */
828         mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
829         GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
830         if (atomic_sub_return(count, &vma->pages_count) == 0) {
831                 vma->ops->clear_pages(vma);
832                 GEM_BUG_ON(vma->pages);
833                 if (vma->obj)
834                         i915_gem_object_unpin_pages(vma->obj);
835         }
836         mutex_unlock(&vma->pages_mutex);
837 }
838
839 static void vma_put_pages(struct i915_vma *vma)
840 {
841         if (atomic_add_unless(&vma->pages_count, -1, 1))
842                 return;
843
844         __vma_put_pages(vma, 1);
845 }
846
847 static void vma_unbind_pages(struct i915_vma *vma)
848 {
849         unsigned int count;
850
851         lockdep_assert_held(&vma->vm->mutex);
852
853         /* The upper portion of pages_count is the number of bindings */
854         count = atomic_read(&vma->pages_count);
855         count >>= I915_VMA_PAGES_BIAS;
856         GEM_BUG_ON(!count);
857
858         __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
859 }
860
861 int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
862 {
863         struct i915_vma_work *work = NULL;
864         intel_wakeref_t wakeref = 0;
865         unsigned int bound;
866         int err;
867
868         BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
869         BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
870
871         GEM_BUG_ON(flags & PIN_UPDATE);
872         GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
873
874         /* First try and grab the pin without rebinding the vma */
875         if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
876                 return 0;
877
878         err = vma_get_pages(vma);
879         if (err)
880                 return err;
881
882         if (flags & vma->vm->bind_async_flags) {
883                 work = i915_vma_work();
884                 if (!work) {
885                         err = -ENOMEM;
886                         goto err_pages;
887                 }
888         }
889
890         if (flags & PIN_GLOBAL)
891                 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
892
893         /*
894          * Differentiate between user/kernel vma inside the aliasing-ppgtt.
895          *
896          * We conflate the Global GTT with the user's vma when using the
897          * aliasing-ppgtt, but it is still vitally important to try and
898          * keep the use cases distinct. For example, userptr objects are
899          * not allowed inside the Global GTT as that will cause lock
900          * inversions when we have to evict them the mmu_notifier callbacks -
901          * but they are allowed to be part of the user ppGTT which can never
902          * be mapped. As such we try to give the distinct users of the same
903          * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
904          * and i915_ppgtt separate].
905          *
906          * NB this may cause us to mask real lock inversions -- while the
907          * code is safe today, lockdep may not be able to spot future
908          * transgressions.
909          */
910         err = mutex_lock_interruptible_nested(&vma->vm->mutex,
911                                               !(flags & PIN_GLOBAL));
912         if (err)
913                 goto err_fence;
914
915         /* No more allocations allowed now we hold vm->mutex */
916
917         if (unlikely(i915_vma_is_closed(vma))) {
918                 err = -ENOENT;
919                 goto err_unlock;
920         }
921
922         bound = atomic_read(&vma->flags);
923         if (unlikely(bound & I915_VMA_ERROR)) {
924                 err = -ENOMEM;
925                 goto err_unlock;
926         }
927
928         if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
929                 err = -EAGAIN; /* pins are meant to be fairly temporary */
930                 goto err_unlock;
931         }
932
933         if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
934                 __i915_vma_pin(vma);
935                 goto err_unlock;
936         }
937
938         err = i915_active_acquire(&vma->active);
939         if (err)
940                 goto err_unlock;
941
942         if (!(bound & I915_VMA_BIND_MASK)) {
943                 err = i915_vma_insert(vma, size, alignment, flags);
944                 if (err)
945                         goto err_active;
946
947                 if (i915_is_ggtt(vma->vm))
948                         __i915_vma_set_map_and_fenceable(vma);
949         }
950
951         GEM_BUG_ON(!vma->pages);
952         err = i915_vma_bind(vma,
953                             vma->obj ? vma->obj->cache_level : 0,
954                             flags, work);
955         if (err)
956                 goto err_remove;
957
958         /* There should only be at most 2 active bindings (user, global) */
959         GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
960         atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
961         list_move_tail(&vma->vm_link, &vma->vm->bound_list);
962
963         __i915_vma_pin(vma);
964         GEM_BUG_ON(!i915_vma_is_pinned(vma));
965         GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
966         GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
967
968 err_remove:
969         if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
970                 i915_vma_detach(vma);
971                 drm_mm_remove_node(&vma->node);
972         }
973 err_active:
974         i915_active_release(&vma->active);
975 err_unlock:
976         mutex_unlock(&vma->vm->mutex);
977 err_fence:
978         if (work)
979                 dma_fence_work_commit_imm(&work->base);
980         if (wakeref)
981                 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
982 err_pages:
983         vma_put_pages(vma);
984         return err;
985 }
986
987 static void flush_idle_contexts(struct intel_gt *gt)
988 {
989         struct intel_engine_cs *engine;
990         enum intel_engine_id id;
991
992         for_each_engine(engine, gt, id)
993                 intel_engine_flush_barriers(engine);
994
995         intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
996 }
997
998 int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
999 {
1000         struct i915_address_space *vm = vma->vm;
1001         int err;
1002
1003         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1004
1005         do {
1006                 err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
1007                 if (err != -ENOSPC) {
1008                         if (!err) {
1009                                 err = i915_vma_wait_for_bind(vma);
1010                                 if (err)
1011                                         i915_vma_unpin(vma);
1012                         }
1013                         return err;
1014                 }
1015
1016                 /* Unlike i915_vma_pin, we don't take no for an answer! */
1017                 flush_idle_contexts(vm->gt);
1018                 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1019                         i915_gem_evict_vm(vm);
1020                         mutex_unlock(&vm->mutex);
1021                 }
1022         } while (1);
1023 }
1024
1025 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1026 {
1027         /*
1028          * We defer actually closing, unbinding and destroying the VMA until
1029          * the next idle point, or if the object is freed in the meantime. By
1030          * postponing the unbind, we allow for it to be resurrected by the
1031          * client, avoiding the work required to rebind the VMA. This is
1032          * advantageous for DRI, where the client/server pass objects
1033          * between themselves, temporarily opening a local VMA to the
1034          * object, and then closing it again. The same object is then reused
1035          * on the next frame (or two, depending on the depth of the swap queue)
1036          * causing us to rebind the VMA once more. This ends up being a lot
1037          * of wasted work for the steady state.
1038          */
1039         GEM_BUG_ON(i915_vma_is_closed(vma));
1040         list_add(&vma->closed_link, &gt->closed_vma);
1041 }
1042
1043 void i915_vma_close(struct i915_vma *vma)
1044 {
1045         struct intel_gt *gt = vma->vm->gt;
1046         unsigned long flags;
1047
1048         if (i915_vma_is_ggtt(vma))
1049                 return;
1050
1051         GEM_BUG_ON(!atomic_read(&vma->open_count));
1052         if (atomic_dec_and_lock_irqsave(&vma->open_count,
1053                                         &gt->closed_lock,
1054                                         flags)) {
1055                 __vma_close(vma, gt);
1056                 spin_unlock_irqrestore(&gt->closed_lock, flags);
1057         }
1058 }
1059
1060 static void __i915_vma_remove_closed(struct i915_vma *vma)
1061 {
1062         struct intel_gt *gt = vma->vm->gt;
1063
1064         spin_lock_irq(&gt->closed_lock);
1065         list_del_init(&vma->closed_link);
1066         spin_unlock_irq(&gt->closed_lock);
1067 }
1068
1069 void i915_vma_reopen(struct i915_vma *vma)
1070 {
1071         if (i915_vma_is_closed(vma))
1072                 __i915_vma_remove_closed(vma);
1073 }
1074
1075 void i915_vma_release(struct kref *ref)
1076 {
1077         struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1078
1079         if (drm_mm_node_allocated(&vma->node)) {
1080                 mutex_lock(&vma->vm->mutex);
1081                 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1082                 WARN_ON(__i915_vma_unbind(vma));
1083                 mutex_unlock(&vma->vm->mutex);
1084                 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1085         }
1086         GEM_BUG_ON(i915_vma_is_active(vma));
1087
1088         if (vma->obj) {
1089                 struct drm_i915_gem_object *obj = vma->obj;
1090
1091                 spin_lock(&obj->vma.lock);
1092                 list_del(&vma->obj_link);
1093                 rb_erase(&vma->obj_node, &obj->vma.tree);
1094                 spin_unlock(&obj->vma.lock);
1095         }
1096
1097         __i915_vma_remove_closed(vma);
1098         i915_vm_put(vma->vm);
1099
1100         i915_active_fini(&vma->active);
1101         i915_vma_free(vma);
1102 }
1103
1104 void i915_vma_parked(struct intel_gt *gt)
1105 {
1106         struct i915_vma *vma, *next;
1107         LIST_HEAD(closed);
1108
1109         spin_lock_irq(&gt->closed_lock);
1110         list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
1111                 struct drm_i915_gem_object *obj = vma->obj;
1112                 struct i915_address_space *vm = vma->vm;
1113
1114                 /* XXX All to avoid keeping a reference on i915_vma itself */
1115
1116                 if (!kref_get_unless_zero(&obj->base.refcount))
1117                         continue;
1118
1119                 if (!i915_vm_tryopen(vm)) {
1120                         i915_gem_object_put(obj);
1121                         continue;
1122                 }
1123
1124                 list_move(&vma->closed_link, &closed);
1125         }
1126         spin_unlock_irq(&gt->closed_lock);
1127
1128         /* As the GT is held idle, no vma can be reopened as we destroy them */
1129         list_for_each_entry_safe(vma, next, &closed, closed_link) {
1130                 struct drm_i915_gem_object *obj = vma->obj;
1131                 struct i915_address_space *vm = vma->vm;
1132
1133                 INIT_LIST_HEAD(&vma->closed_link);
1134                 __i915_vma_put(vma);
1135
1136                 i915_gem_object_put(obj);
1137                 i915_vm_close(vm);
1138         }
1139 }
1140
1141 static void __i915_vma_iounmap(struct i915_vma *vma)
1142 {
1143         GEM_BUG_ON(i915_vma_is_pinned(vma));
1144
1145         if (vma->iomap == NULL)
1146                 return;
1147
1148         io_mapping_unmap(vma->iomap);
1149         vma->iomap = NULL;
1150 }
1151
1152 void i915_vma_revoke_mmap(struct i915_vma *vma)
1153 {
1154         struct drm_vma_offset_node *node;
1155         u64 vma_offset;
1156
1157         if (!i915_vma_has_userfault(vma))
1158                 return;
1159
1160         GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1161         GEM_BUG_ON(!vma->obj->userfault_count);
1162
1163         node = &vma->mmo->vma_node;
1164         vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1165         unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1166                             drm_vma_node_offset_addr(node) + vma_offset,
1167                             vma->size,
1168                             1);
1169
1170         i915_vma_unset_userfault(vma);
1171         if (!--vma->obj->userfault_count)
1172                 list_del(&vma->obj->userfault_link);
1173 }
1174
1175 int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1176 {
1177         int err;
1178
1179         GEM_BUG_ON(!i915_vma_is_pinned(vma));
1180
1181         /* Wait for the vma to be bound before we start! */
1182         err = i915_request_await_active(rq, &vma->active,
1183                                         I915_ACTIVE_AWAIT_EXCL);
1184         if (err)
1185                 return err;
1186
1187         return i915_active_add_request(&vma->active, rq);
1188 }
1189
1190 int i915_vma_move_to_active(struct i915_vma *vma,
1191                             struct i915_request *rq,
1192                             unsigned int flags)
1193 {
1194         struct drm_i915_gem_object *obj = vma->obj;
1195         int err;
1196
1197         assert_object_held(obj);
1198
1199         err = __i915_vma_move_to_active(vma, rq);
1200         if (unlikely(err))
1201                 return err;
1202
1203         if (flags & EXEC_OBJECT_WRITE) {
1204                 struct intel_frontbuffer *front;
1205
1206                 front = __intel_frontbuffer_get(obj);
1207                 if (unlikely(front)) {
1208                         if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1209                                 i915_active_add_request(&front->write, rq);
1210                         intel_frontbuffer_put(front);
1211                 }
1212
1213                 dma_resv_add_excl_fence(vma->resv, &rq->fence);
1214                 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1215                 obj->read_domains = 0;
1216         } else {
1217                 err = dma_resv_reserve_shared(vma->resv, 1);
1218                 if (unlikely(err))
1219                         return err;
1220
1221                 dma_resv_add_shared_fence(vma->resv, &rq->fence);
1222                 obj->write_domain = 0;
1223         }
1224
1225         if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1226                 i915_active_add_request(&vma->fence->active, rq);
1227
1228         obj->read_domains |= I915_GEM_GPU_DOMAINS;
1229         obj->mm.dirty = true;
1230
1231         GEM_BUG_ON(!i915_vma_is_active(vma));
1232         return 0;
1233 }
1234
1235 int __i915_vma_unbind(struct i915_vma *vma)
1236 {
1237         int ret;
1238
1239         lockdep_assert_held(&vma->vm->mutex);
1240
1241         if (i915_vma_is_pinned(vma)) {
1242                 vma_print_allocator(vma, "is pinned");
1243                 return -EAGAIN;
1244         }
1245
1246         /*
1247          * After confirming that no one else is pinning this vma, wait for
1248          * any laggards who may have crept in during the wait (through
1249          * a residual pin skipping the vm->mutex) to complete.
1250          */
1251         ret = i915_vma_sync(vma);
1252         if (ret)
1253                 return ret;
1254
1255         if (!drm_mm_node_allocated(&vma->node))
1256                 return 0;
1257
1258         GEM_BUG_ON(i915_vma_is_pinned(vma));
1259         GEM_BUG_ON(i915_vma_is_active(vma));
1260
1261         if (i915_vma_is_map_and_fenceable(vma)) {
1262                 /* Force a pagefault for domain tracking on next user access */
1263                 i915_vma_revoke_mmap(vma);
1264
1265                 /*
1266                  * Check that we have flushed all writes through the GGTT
1267                  * before the unbind, other due to non-strict nature of those
1268                  * indirect writes they may end up referencing the GGTT PTE
1269                  * after the unbind.
1270                  *
1271                  * Note that we may be concurrently poking at the GGTT_WRITE
1272                  * bit from set-domain, as we mark all GGTT vma associated
1273                  * with an object. We know this is for another vma, as we
1274                  * are currently unbinding this one -- so if this vma will be
1275                  * reused, it will be refaulted and have its dirty bit set
1276                  * before the next write.
1277                  */
1278                 i915_vma_flush_writes(vma);
1279
1280                 /* release the fence reg _after_ flushing */
1281                 i915_vma_revoke_fence(vma);
1282
1283                 __i915_vma_iounmap(vma);
1284                 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1285         }
1286         GEM_BUG_ON(vma->fence);
1287         GEM_BUG_ON(i915_vma_has_userfault(vma));
1288
1289         if (likely(atomic_read(&vma->vm->open))) {
1290                 trace_i915_vma_unbind(vma);
1291                 vma->ops->unbind_vma(vma);
1292         }
1293         atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1294                    &vma->flags);
1295
1296         i915_vma_detach(vma);
1297         vma_unbind_pages(vma);
1298
1299         drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
1300         return 0;
1301 }
1302
1303 int i915_vma_unbind(struct i915_vma *vma)
1304 {
1305         struct i915_address_space *vm = vma->vm;
1306         intel_wakeref_t wakeref = 0;
1307         int err;
1308
1309         if (!drm_mm_node_allocated(&vma->node))
1310                 return 0;
1311
1312         /* Optimistic wait before taking the mutex */
1313         err = i915_vma_sync(vma);
1314         if (err)
1315                 goto out_rpm;
1316
1317         if (i915_vma_is_pinned(vma)) {
1318                 vma_print_allocator(vma, "is pinned");
1319                 return -EAGAIN;
1320         }
1321
1322         if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1323                 /* XXX not always required: nop_clear_range */
1324                 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1325
1326         err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
1327         if (err)
1328                 goto out_rpm;
1329
1330         err = __i915_vma_unbind(vma);
1331         mutex_unlock(&vm->mutex);
1332
1333 out_rpm:
1334         if (wakeref)
1335                 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
1336         return err;
1337 }
1338
1339 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1340 {
1341         i915_gem_object_make_unshrinkable(vma->obj);
1342         return vma;
1343 }
1344
1345 void i915_vma_make_shrinkable(struct i915_vma *vma)
1346 {
1347         i915_gem_object_make_shrinkable(vma->obj);
1348 }
1349
1350 void i915_vma_make_purgeable(struct i915_vma *vma)
1351 {
1352         i915_gem_object_make_purgeable(vma->obj);
1353 }
1354
1355 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1356 #include "selftests/i915_vma.c"
1357 #endif
1358
1359 static void i915_global_vma_shrink(void)
1360 {
1361         kmem_cache_shrink(global.slab_vmas);
1362 }
1363
1364 static void i915_global_vma_exit(void)
1365 {
1366         kmem_cache_destroy(global.slab_vmas);
1367 }
1368
1369 static struct i915_global_vma global = { {
1370         .shrink = i915_global_vma_shrink,
1371         .exit = i915_global_vma_exit,
1372 } };
1373
1374 int __init i915_global_vma_init(void)
1375 {
1376         global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1377         if (!global.slab_vmas)
1378                 return -ENOMEM;
1379
1380         i915_global_register(&global.base);
1381         return 0;
1382 }