Merge branch 'i2c/for-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/wsa/linux
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_vma.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/sched/mm.h>
26 #include <drm/drm_gem.h>
27
28 #include "display/intel_frontbuffer.h"
29
30 #include "gt/intel_engine.h"
31 #include "gt/intel_engine_heartbeat.h"
32 #include "gt/intel_gt.h"
33 #include "gt/intel_gt_requests.h"
34
35 #include "i915_drv.h"
36 #include "i915_globals.h"
37 #include "i915_sw_fence_work.h"
38 #include "i915_trace.h"
39 #include "i915_vma.h"
40
41 static struct i915_global_vma {
42         struct i915_global base;
43         struct kmem_cache *slab_vmas;
44 } global;
45
46 struct i915_vma *i915_vma_alloc(void)
47 {
48         return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
49 }
50
51 void i915_vma_free(struct i915_vma *vma)
52 {
53         return kmem_cache_free(global.slab_vmas, vma);
54 }
55
56 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
57
58 #include <linux/stackdepot.h>
59
60 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
61 {
62         unsigned long *entries;
63         unsigned int nr_entries;
64         char buf[512];
65
66         if (!vma->node.stack) {
67                 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
68                                  vma->node.start, vma->node.size, reason);
69                 return;
70         }
71
72         nr_entries = stack_depot_fetch(vma->node.stack, &entries);
73         stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
74         DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
75                          vma->node.start, vma->node.size, reason, buf);
76 }
77
78 #else
79
80 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
81 {
82 }
83
84 #endif
85
86 static inline struct i915_vma *active_to_vma(struct i915_active *ref)
87 {
88         return container_of(ref, typeof(struct i915_vma), active);
89 }
90
91 static int __i915_vma_active(struct i915_active *ref)
92 {
93         return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
94 }
95
96 __i915_active_call
97 static void __i915_vma_retire(struct i915_active *ref)
98 {
99         i915_vma_put(active_to_vma(ref));
100 }
101
102 static struct i915_vma *
103 vma_create(struct drm_i915_gem_object *obj,
104            struct i915_address_space *vm,
105            const struct i915_ggtt_view *view)
106 {
107         struct i915_vma *vma;
108         struct rb_node *rb, **p;
109
110         /* The aliasing_ppgtt should never be used directly! */
111         GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
112
113         vma = i915_vma_alloc();
114         if (vma == NULL)
115                 return ERR_PTR(-ENOMEM);
116
117         kref_init(&vma->ref);
118         mutex_init(&vma->pages_mutex);
119         vma->vm = i915_vm_get(vm);
120         vma->ops = &vm->vma_ops;
121         vma->obj = obj;
122         vma->resv = obj->base.resv;
123         vma->size = obj->base.size;
124         vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
125
126         i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire);
127
128         /* Declare ourselves safe for use inside shrinkers */
129         if (IS_ENABLED(CONFIG_LOCKDEP)) {
130                 fs_reclaim_acquire(GFP_KERNEL);
131                 might_lock(&vma->active.mutex);
132                 fs_reclaim_release(GFP_KERNEL);
133         }
134
135         INIT_LIST_HEAD(&vma->closed_link);
136
137         if (view && view->type != I915_GGTT_VIEW_NORMAL) {
138                 vma->ggtt_view = *view;
139                 if (view->type == I915_GGTT_VIEW_PARTIAL) {
140                         GEM_BUG_ON(range_overflows_t(u64,
141                                                      view->partial.offset,
142                                                      view->partial.size,
143                                                      obj->base.size >> PAGE_SHIFT));
144                         vma->size = view->partial.size;
145                         vma->size <<= PAGE_SHIFT;
146                         GEM_BUG_ON(vma->size > obj->base.size);
147                 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
148                         vma->size = intel_rotation_info_size(&view->rotated);
149                         vma->size <<= PAGE_SHIFT;
150                 } else if (view->type == I915_GGTT_VIEW_REMAPPED) {
151                         vma->size = intel_remapped_info_size(&view->remapped);
152                         vma->size <<= PAGE_SHIFT;
153                 }
154         }
155
156         if (unlikely(vma->size > vm->total))
157                 goto err_vma;
158
159         GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
160
161         if (i915_is_ggtt(vm)) {
162                 if (unlikely(overflows_type(vma->size, u32)))
163                         goto err_vma;
164
165                 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
166                                                       i915_gem_object_get_tiling(obj),
167                                                       i915_gem_object_get_stride(obj));
168                 if (unlikely(vma->fence_size < vma->size || /* overflow */
169                              vma->fence_size > vm->total))
170                         goto err_vma;
171
172                 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
173
174                 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
175                                                                 i915_gem_object_get_tiling(obj),
176                                                                 i915_gem_object_get_stride(obj));
177                 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
178
179                 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
180         }
181
182         spin_lock(&obj->vma.lock);
183
184         rb = NULL;
185         p = &obj->vma.tree.rb_node;
186         while (*p) {
187                 struct i915_vma *pos;
188                 long cmp;
189
190                 rb = *p;
191                 pos = rb_entry(rb, struct i915_vma, obj_node);
192
193                 /*
194                  * If the view already exists in the tree, another thread
195                  * already created a matching vma, so return the older instance
196                  * and dispose of ours.
197                  */
198                 cmp = i915_vma_compare(pos, vm, view);
199                 if (cmp == 0) {
200                         spin_unlock(&obj->vma.lock);
201                         i915_vma_free(vma);
202                         return pos;
203                 }
204
205                 if (cmp < 0)
206                         p = &rb->rb_right;
207                 else
208                         p = &rb->rb_left;
209         }
210         rb_link_node(&vma->obj_node, rb, p);
211         rb_insert_color(&vma->obj_node, &obj->vma.tree);
212
213         if (i915_vma_is_ggtt(vma))
214                 /*
215                  * We put the GGTT vma at the start of the vma-list, followed
216                  * by the ppGGTT vma. This allows us to break early when
217                  * iterating over only the GGTT vma for an object, see
218                  * for_each_ggtt_vma()
219                  */
220                 list_add(&vma->obj_link, &obj->vma.list);
221         else
222                 list_add_tail(&vma->obj_link, &obj->vma.list);
223
224         spin_unlock(&obj->vma.lock);
225
226         return vma;
227
228 err_vma:
229         i915_vma_free(vma);
230         return ERR_PTR(-E2BIG);
231 }
232
233 static struct i915_vma *
234 vma_lookup(struct drm_i915_gem_object *obj,
235            struct i915_address_space *vm,
236            const struct i915_ggtt_view *view)
237 {
238         struct rb_node *rb;
239
240         rb = obj->vma.tree.rb_node;
241         while (rb) {
242                 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
243                 long cmp;
244
245                 cmp = i915_vma_compare(vma, vm, view);
246                 if (cmp == 0)
247                         return vma;
248
249                 if (cmp < 0)
250                         rb = rb->rb_right;
251                 else
252                         rb = rb->rb_left;
253         }
254
255         return NULL;
256 }
257
258 /**
259  * i915_vma_instance - return the singleton instance of the VMA
260  * @obj: parent &struct drm_i915_gem_object to be mapped
261  * @vm: address space in which the mapping is located
262  * @view: additional mapping requirements
263  *
264  * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
265  * the same @view characteristics. If a match is not found, one is created.
266  * Once created, the VMA is kept until either the object is freed, or the
267  * address space is closed.
268  *
269  * Returns the vma, or an error pointer.
270  */
271 struct i915_vma *
272 i915_vma_instance(struct drm_i915_gem_object *obj,
273                   struct i915_address_space *vm,
274                   const struct i915_ggtt_view *view)
275 {
276         struct i915_vma *vma;
277
278         GEM_BUG_ON(view && !i915_is_ggtt(vm));
279         GEM_BUG_ON(!atomic_read(&vm->open));
280
281         spin_lock(&obj->vma.lock);
282         vma = vma_lookup(obj, vm, view);
283         spin_unlock(&obj->vma.lock);
284
285         /* vma_create() will resolve the race if another creates the vma */
286         if (unlikely(!vma))
287                 vma = vma_create(obj, vm, view);
288
289         GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
290         return vma;
291 }
292
293 struct i915_vma_work {
294         struct dma_fence_work base;
295         struct i915_vma *vma;
296         struct drm_i915_gem_object *pinned;
297         struct i915_sw_dma_fence_cb cb;
298         enum i915_cache_level cache_level;
299         unsigned int flags;
300 };
301
302 static int __vma_bind(struct dma_fence_work *work)
303 {
304         struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
305         struct i915_vma *vma = vw->vma;
306         int err;
307
308         err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags);
309         if (err)
310                 atomic_or(I915_VMA_ERROR, &vma->flags);
311
312         return err;
313 }
314
315 static void __vma_release(struct dma_fence_work *work)
316 {
317         struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
318
319         if (vw->pinned)
320                 __i915_gem_object_unpin_pages(vw->pinned);
321 }
322
323 static const struct dma_fence_work_ops bind_ops = {
324         .name = "bind",
325         .work = __vma_bind,
326         .release = __vma_release,
327 };
328
329 struct i915_vma_work *i915_vma_work(void)
330 {
331         struct i915_vma_work *vw;
332
333         vw = kzalloc(sizeof(*vw), GFP_KERNEL);
334         if (!vw)
335                 return NULL;
336
337         dma_fence_work_init(&vw->base, &bind_ops);
338         vw->base.dma.error = -EAGAIN; /* disable the worker by default */
339
340         return vw;
341 }
342
343 int i915_vma_wait_for_bind(struct i915_vma *vma)
344 {
345         int err = 0;
346
347         if (rcu_access_pointer(vma->active.excl.fence)) {
348                 struct dma_fence *fence;
349
350                 rcu_read_lock();
351                 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
352                 rcu_read_unlock();
353                 if (fence) {
354                         err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
355                         dma_fence_put(fence);
356                 }
357         }
358
359         return err;
360 }
361
362 /**
363  * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
364  * @vma: VMA to map
365  * @cache_level: mapping cache level
366  * @flags: flags like global or local mapping
367  * @work: preallocated worker for allocating and binding the PTE
368  *
369  * DMA addresses are taken from the scatter-gather table of this object (or of
370  * this VMA in case of non-default GGTT views) and PTE entries set up.
371  * Note that DMA addresses are also the only part of the SG table we care about.
372  */
373 int i915_vma_bind(struct i915_vma *vma,
374                   enum i915_cache_level cache_level,
375                   u32 flags,
376                   struct i915_vma_work *work)
377 {
378         u32 bind_flags;
379         u32 vma_flags;
380         int ret;
381
382         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
383         GEM_BUG_ON(vma->size > vma->node.size);
384
385         if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
386                                               vma->node.size,
387                                               vma->vm->total)))
388                 return -ENODEV;
389
390         if (GEM_DEBUG_WARN_ON(!flags))
391                 return -EINVAL;
392
393         bind_flags = flags;
394         bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
395
396         vma_flags = atomic_read(&vma->flags);
397         vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
398         if (flags & PIN_UPDATE)
399                 bind_flags |= vma_flags;
400         else
401                 bind_flags &= ~vma_flags;
402         if (bind_flags == 0)
403                 return 0;
404
405         GEM_BUG_ON(!vma->pages);
406
407         trace_i915_vma_bind(vma, bind_flags);
408         if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
409                 struct dma_fence *prev;
410
411                 work->vma = vma;
412                 work->cache_level = cache_level;
413                 work->flags = bind_flags | I915_VMA_ALLOC;
414
415                 /*
416                  * Note we only want to chain up to the migration fence on
417                  * the pages (not the object itself). As we don't track that,
418                  * yet, we have to use the exclusive fence instead.
419                  *
420                  * Also note that we do not want to track the async vma as
421                  * part of the obj->resv->excl_fence as it only affects
422                  * execution and not content or object's backing store lifetime.
423                  */
424                 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
425                 if (prev) {
426                         __i915_sw_fence_await_dma_fence(&work->base.chain,
427                                                         prev,
428                                                         &work->cb);
429                         dma_fence_put(prev);
430                 }
431
432                 work->base.dma.error = 0; /* enable the queue_work() */
433
434                 if (vma->obj) {
435                         __i915_gem_object_pin_pages(vma->obj);
436                         work->pinned = vma->obj;
437                 }
438         } else {
439                 ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
440                 if (ret)
441                         return ret;
442         }
443
444         atomic_or(bind_flags, &vma->flags);
445         return 0;
446 }
447
448 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
449 {
450         void __iomem *ptr;
451         int err;
452
453         if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
454                 err = -ENODEV;
455                 goto err;
456         }
457
458         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
459         GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
460
461         ptr = READ_ONCE(vma->iomap);
462         if (ptr == NULL) {
463                 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
464                                         vma->node.start,
465                                         vma->node.size);
466                 if (ptr == NULL) {
467                         err = -ENOMEM;
468                         goto err;
469                 }
470
471                 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
472                         io_mapping_unmap(ptr);
473                         ptr = vma->iomap;
474                 }
475         }
476
477         __i915_vma_pin(vma);
478
479         err = i915_vma_pin_fence(vma);
480         if (err)
481                 goto err_unpin;
482
483         i915_vma_set_ggtt_write(vma);
484
485         /* NB Access through the GTT requires the device to be awake. */
486         return ptr;
487
488 err_unpin:
489         __i915_vma_unpin(vma);
490 err:
491         return IO_ERR_PTR(err);
492 }
493
494 void i915_vma_flush_writes(struct i915_vma *vma)
495 {
496         if (i915_vma_unset_ggtt_write(vma))
497                 intel_gt_flush_ggtt_writes(vma->vm->gt);
498 }
499
500 void i915_vma_unpin_iomap(struct i915_vma *vma)
501 {
502         GEM_BUG_ON(vma->iomap == NULL);
503
504         i915_vma_flush_writes(vma);
505
506         i915_vma_unpin_fence(vma);
507         i915_vma_unpin(vma);
508 }
509
510 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
511 {
512         struct i915_vma *vma;
513         struct drm_i915_gem_object *obj;
514
515         vma = fetch_and_zero(p_vma);
516         if (!vma)
517                 return;
518
519         obj = vma->obj;
520         GEM_BUG_ON(!obj);
521
522         i915_vma_unpin(vma);
523         i915_vma_close(vma);
524
525         if (flags & I915_VMA_RELEASE_MAP)
526                 i915_gem_object_unpin_map(obj);
527
528         i915_gem_object_put(obj);
529 }
530
531 bool i915_vma_misplaced(const struct i915_vma *vma,
532                         u64 size, u64 alignment, u64 flags)
533 {
534         if (!drm_mm_node_allocated(&vma->node))
535                 return false;
536
537         if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
538                 return true;
539
540         if (vma->node.size < size)
541                 return true;
542
543         GEM_BUG_ON(alignment && !is_power_of_2(alignment));
544         if (alignment && !IS_ALIGNED(vma->node.start, alignment))
545                 return true;
546
547         if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
548                 return true;
549
550         if (flags & PIN_OFFSET_BIAS &&
551             vma->node.start < (flags & PIN_OFFSET_MASK))
552                 return true;
553
554         if (flags & PIN_OFFSET_FIXED &&
555             vma->node.start != (flags & PIN_OFFSET_MASK))
556                 return true;
557
558         return false;
559 }
560
561 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
562 {
563         bool mappable, fenceable;
564
565         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
566         GEM_BUG_ON(!vma->fence_size);
567
568         fenceable = (vma->node.size >= vma->fence_size &&
569                      IS_ALIGNED(vma->node.start, vma->fence_alignment));
570
571         mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
572
573         if (mappable && fenceable)
574                 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
575         else
576                 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
577 }
578
579 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
580 {
581         struct drm_mm_node *node = &vma->node;
582         struct drm_mm_node *other;
583
584         /*
585          * On some machines we have to be careful when putting differing types
586          * of snoopable memory together to avoid the prefetcher crossing memory
587          * domains and dying. During vm initialisation, we decide whether or not
588          * these constraints apply and set the drm_mm.color_adjust
589          * appropriately.
590          */
591         if (!i915_vm_has_cache_coloring(vma->vm))
592                 return true;
593
594         /* Only valid to be called on an already inserted vma */
595         GEM_BUG_ON(!drm_mm_node_allocated(node));
596         GEM_BUG_ON(list_empty(&node->node_list));
597
598         other = list_prev_entry(node, node_list);
599         if (i915_node_color_differs(other, color) &&
600             !drm_mm_hole_follows(other))
601                 return false;
602
603         other = list_next_entry(node, node_list);
604         if (i915_node_color_differs(other, color) &&
605             !drm_mm_hole_follows(node))
606                 return false;
607
608         return true;
609 }
610
611 static void assert_bind_count(const struct drm_i915_gem_object *obj)
612 {
613         /*
614          * Combine the assertion that the object is bound and that we have
615          * pinned its pages. But we should never have bound the object
616          * more than we have pinned its pages. (For complete accuracy, we
617          * assume that no else is pinning the pages, but as a rough assertion
618          * that we will not run into problems later, this will do!)
619          */
620         GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < atomic_read(&obj->bind_count));
621 }
622
623 /**
624  * i915_vma_insert - finds a slot for the vma in its address space
625  * @vma: the vma
626  * @size: requested size in bytes (can be larger than the VMA)
627  * @alignment: required alignment
628  * @flags: mask of PIN_* flags to use
629  *
630  * First we try to allocate some free space that meets the requirements for
631  * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
632  * preferrably the oldest idle entry to make room for the new VMA.
633  *
634  * Returns:
635  * 0 on success, negative error code otherwise.
636  */
637 static int
638 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
639 {
640         unsigned long color;
641         u64 start, end;
642         int ret;
643
644         GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
645         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
646
647         size = max(size, vma->size);
648         alignment = max(alignment, vma->display_alignment);
649         if (flags & PIN_MAPPABLE) {
650                 size = max_t(typeof(size), size, vma->fence_size);
651                 alignment = max_t(typeof(alignment),
652                                   alignment, vma->fence_alignment);
653         }
654
655         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
656         GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
657         GEM_BUG_ON(!is_power_of_2(alignment));
658
659         start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
660         GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
661
662         end = vma->vm->total;
663         if (flags & PIN_MAPPABLE)
664                 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
665         if (flags & PIN_ZONE_4G)
666                 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
667         GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
668
669         /* If binding the object/GGTT view requires more space than the entire
670          * aperture has, reject it early before evicting everything in a vain
671          * attempt to find space.
672          */
673         if (size > end) {
674                 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
675                           size, flags & PIN_MAPPABLE ? "mappable" : "total",
676                           end);
677                 return -ENOSPC;
678         }
679
680         color = 0;
681         if (vma->obj && i915_vm_has_cache_coloring(vma->vm))
682                 color = vma->obj->cache_level;
683
684         if (flags & PIN_OFFSET_FIXED) {
685                 u64 offset = flags & PIN_OFFSET_MASK;
686                 if (!IS_ALIGNED(offset, alignment) ||
687                     range_overflows(offset, size, end))
688                         return -EINVAL;
689
690                 ret = i915_gem_gtt_reserve(vma->vm, &vma->node,
691                                            size, offset, color,
692                                            flags);
693                 if (ret)
694                         return ret;
695         } else {
696                 /*
697                  * We only support huge gtt pages through the 48b PPGTT,
698                  * however we also don't want to force any alignment for
699                  * objects which need to be tightly packed into the low 32bits.
700                  *
701                  * Note that we assume that GGTT are limited to 4GiB for the
702                  * forseeable future. See also i915_ggtt_offset().
703                  */
704                 if (upper_32_bits(end - 1) &&
705                     vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
706                         /*
707                          * We can't mix 64K and 4K PTEs in the same page-table
708                          * (2M block), and so to avoid the ugliness and
709                          * complexity of coloring we opt for just aligning 64K
710                          * objects to 2M.
711                          */
712                         u64 page_alignment =
713                                 rounddown_pow_of_two(vma->page_sizes.sg |
714                                                      I915_GTT_PAGE_SIZE_2M);
715
716                         /*
717                          * Check we don't expand for the limited Global GTT
718                          * (mappable aperture is even more precious!). This
719                          * also checks that we exclude the aliasing-ppgtt.
720                          */
721                         GEM_BUG_ON(i915_vma_is_ggtt(vma));
722
723                         alignment = max(alignment, page_alignment);
724
725                         if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
726                                 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
727                 }
728
729                 ret = i915_gem_gtt_insert(vma->vm, &vma->node,
730                                           size, alignment, color,
731                                           start, end, flags);
732                 if (ret)
733                         return ret;
734
735                 GEM_BUG_ON(vma->node.start < start);
736                 GEM_BUG_ON(vma->node.start + vma->node.size > end);
737         }
738         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
739         GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
740
741         if (vma->obj) {
742                 struct drm_i915_gem_object *obj = vma->obj;
743
744                 atomic_inc(&obj->bind_count);
745                 assert_bind_count(obj);
746         }
747         list_add_tail(&vma->vm_link, &vma->vm->bound_list);
748
749         return 0;
750 }
751
752 static void
753 i915_vma_detach(struct i915_vma *vma)
754 {
755         GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
756         GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
757
758         /*
759          * And finally now the object is completely decoupled from this
760          * vma, we can drop its hold on the backing storage and allow
761          * it to be reaped by the shrinker.
762          */
763         list_del(&vma->vm_link);
764         if (vma->obj) {
765                 struct drm_i915_gem_object *obj = vma->obj;
766
767                 assert_bind_count(obj);
768                 atomic_dec(&obj->bind_count);
769         }
770 }
771
772 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
773 {
774         unsigned int bound;
775         bool pinned = true;
776
777         bound = atomic_read(&vma->flags);
778         do {
779                 if (unlikely(flags & ~bound))
780                         return false;
781
782                 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
783                         return false;
784
785                 if (!(bound & I915_VMA_PIN_MASK))
786                         goto unpinned;
787
788                 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
789         } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
790
791         return true;
792
793 unpinned:
794         /*
795          * If pin_count==0, but we are bound, check under the lock to avoid
796          * racing with a concurrent i915_vma_unbind().
797          */
798         mutex_lock(&vma->vm->mutex);
799         do {
800                 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) {
801                         pinned = false;
802                         break;
803                 }
804
805                 if (unlikely(flags & ~bound)) {
806                         pinned = false;
807                         break;
808                 }
809         } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
810         mutex_unlock(&vma->vm->mutex);
811
812         return pinned;
813 }
814
815 static int vma_get_pages(struct i915_vma *vma)
816 {
817         int err = 0;
818
819         if (atomic_add_unless(&vma->pages_count, 1, 0))
820                 return 0;
821
822         /* Allocations ahoy! */
823         if (mutex_lock_interruptible(&vma->pages_mutex))
824                 return -EINTR;
825
826         if (!atomic_read(&vma->pages_count)) {
827                 if (vma->obj) {
828                         err = i915_gem_object_pin_pages(vma->obj);
829                         if (err)
830                                 goto unlock;
831                 }
832
833                 err = vma->ops->set_pages(vma);
834                 if (err) {
835                         if (vma->obj)
836                                 i915_gem_object_unpin_pages(vma->obj);
837                         goto unlock;
838                 }
839         }
840         atomic_inc(&vma->pages_count);
841
842 unlock:
843         mutex_unlock(&vma->pages_mutex);
844
845         return err;
846 }
847
848 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
849 {
850         /* We allocate under vma_get_pages, so beware the shrinker */
851         mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING);
852         GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
853         if (atomic_sub_return(count, &vma->pages_count) == 0) {
854                 vma->ops->clear_pages(vma);
855                 GEM_BUG_ON(vma->pages);
856                 if (vma->obj)
857                         i915_gem_object_unpin_pages(vma->obj);
858         }
859         mutex_unlock(&vma->pages_mutex);
860 }
861
862 static void vma_put_pages(struct i915_vma *vma)
863 {
864         if (atomic_add_unless(&vma->pages_count, -1, 1))
865                 return;
866
867         __vma_put_pages(vma, 1);
868 }
869
870 static void vma_unbind_pages(struct i915_vma *vma)
871 {
872         unsigned int count;
873
874         lockdep_assert_held(&vma->vm->mutex);
875
876         /* The upper portion of pages_count is the number of bindings */
877         count = atomic_read(&vma->pages_count);
878         count >>= I915_VMA_PAGES_BIAS;
879         GEM_BUG_ON(!count);
880
881         __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
882 }
883
884 int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
885 {
886         struct i915_vma_work *work = NULL;
887         intel_wakeref_t wakeref = 0;
888         unsigned int bound;
889         int err;
890
891         BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
892         BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
893
894         GEM_BUG_ON(flags & PIN_UPDATE);
895         GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
896
897         /* First try and grab the pin without rebinding the vma */
898         if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK))
899                 return 0;
900
901         err = vma_get_pages(vma);
902         if (err)
903                 return err;
904
905         if (flags & vma->vm->bind_async_flags) {
906                 work = i915_vma_work();
907                 if (!work) {
908                         err = -ENOMEM;
909                         goto err_pages;
910                 }
911         }
912
913         if (flags & PIN_GLOBAL)
914                 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
915
916         /* No more allocations allowed once we hold vm->mutex */
917         err = mutex_lock_interruptible(&vma->vm->mutex);
918         if (err)
919                 goto err_fence;
920
921         if (unlikely(i915_vma_is_closed(vma))) {
922                 err = -ENOENT;
923                 goto err_unlock;
924         }
925
926         bound = atomic_read(&vma->flags);
927         if (unlikely(bound & I915_VMA_ERROR)) {
928                 err = -ENOMEM;
929                 goto err_unlock;
930         }
931
932         if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
933                 err = -EAGAIN; /* pins are meant to be fairly temporary */
934                 goto err_unlock;
935         }
936
937         if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
938                 __i915_vma_pin(vma);
939                 goto err_unlock;
940         }
941
942         err = i915_active_acquire(&vma->active);
943         if (err)
944                 goto err_unlock;
945
946         if (!(bound & I915_VMA_BIND_MASK)) {
947                 err = i915_vma_insert(vma, size, alignment, flags);
948                 if (err)
949                         goto err_active;
950
951                 if (i915_is_ggtt(vma->vm))
952                         __i915_vma_set_map_and_fenceable(vma);
953         }
954
955         GEM_BUG_ON(!vma->pages);
956         err = i915_vma_bind(vma,
957                             vma->obj ? vma->obj->cache_level : 0,
958                             flags, work);
959         if (err)
960                 goto err_remove;
961
962         /* There should only be at most 2 active bindings (user, global) */
963         GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
964         atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
965         list_move_tail(&vma->vm_link, &vma->vm->bound_list);
966
967         __i915_vma_pin(vma);
968         GEM_BUG_ON(!i915_vma_is_pinned(vma));
969         GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
970         GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
971
972 err_remove:
973         if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
974                 i915_vma_detach(vma);
975                 drm_mm_remove_node(&vma->node);
976         }
977 err_active:
978         i915_active_release(&vma->active);
979 err_unlock:
980         mutex_unlock(&vma->vm->mutex);
981 err_fence:
982         if (work)
983                 dma_fence_work_commit(&work->base);
984         if (wakeref)
985                 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
986 err_pages:
987         vma_put_pages(vma);
988         return err;
989 }
990
991 static void flush_idle_contexts(struct intel_gt *gt)
992 {
993         struct intel_engine_cs *engine;
994         enum intel_engine_id id;
995
996         for_each_engine(engine, gt, id)
997                 intel_engine_flush_barriers(engine);
998
999         intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1000 }
1001
1002 int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
1003 {
1004         struct i915_address_space *vm = vma->vm;
1005         int err;
1006
1007         GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1008
1009         do {
1010                 err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
1011                 if (err != -ENOSPC) {
1012                         if (!err) {
1013                                 err = i915_vma_wait_for_bind(vma);
1014                                 if (err)
1015                                         i915_vma_unpin(vma);
1016                         }
1017                         return err;
1018                 }
1019
1020                 /* Unlike i915_vma_pin, we don't take no for an answer! */
1021                 flush_idle_contexts(vm->gt);
1022                 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1023                         i915_gem_evict_vm(vm);
1024                         mutex_unlock(&vm->mutex);
1025                 }
1026         } while (1);
1027 }
1028
1029 void i915_vma_close(struct i915_vma *vma)
1030 {
1031         struct intel_gt *gt = vma->vm->gt;
1032         unsigned long flags;
1033
1034         GEM_BUG_ON(i915_vma_is_closed(vma));
1035
1036         /*
1037          * We defer actually closing, unbinding and destroying the VMA until
1038          * the next idle point, or if the object is freed in the meantime. By
1039          * postponing the unbind, we allow for it to be resurrected by the
1040          * client, avoiding the work required to rebind the VMA. This is
1041          * advantageous for DRI, where the client/server pass objects
1042          * between themselves, temporarily opening a local VMA to the
1043          * object, and then closing it again. The same object is then reused
1044          * on the next frame (or two, depending on the depth of the swap queue)
1045          * causing us to rebind the VMA once more. This ends up being a lot
1046          * of wasted work for the steady state.
1047          */
1048         spin_lock_irqsave(&gt->closed_lock, flags);
1049         list_add(&vma->closed_link, &gt->closed_vma);
1050         spin_unlock_irqrestore(&gt->closed_lock, flags);
1051 }
1052
1053 static void __i915_vma_remove_closed(struct i915_vma *vma)
1054 {
1055         struct intel_gt *gt = vma->vm->gt;
1056
1057         spin_lock_irq(&gt->closed_lock);
1058         list_del_init(&vma->closed_link);
1059         spin_unlock_irq(&gt->closed_lock);
1060 }
1061
1062 void i915_vma_reopen(struct i915_vma *vma)
1063 {
1064         if (i915_vma_is_closed(vma))
1065                 __i915_vma_remove_closed(vma);
1066 }
1067
1068 void i915_vma_release(struct kref *ref)
1069 {
1070         struct i915_vma *vma = container_of(ref, typeof(*vma), ref);
1071
1072         if (drm_mm_node_allocated(&vma->node)) {
1073                 mutex_lock(&vma->vm->mutex);
1074                 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1075                 WARN_ON(__i915_vma_unbind(vma));
1076                 mutex_unlock(&vma->vm->mutex);
1077                 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1078         }
1079         GEM_BUG_ON(i915_vma_is_active(vma));
1080
1081         if (vma->obj) {
1082                 struct drm_i915_gem_object *obj = vma->obj;
1083
1084                 spin_lock(&obj->vma.lock);
1085                 list_del(&vma->obj_link);
1086                 rb_erase(&vma->obj_node, &obj->vma.tree);
1087                 spin_unlock(&obj->vma.lock);
1088         }
1089
1090         __i915_vma_remove_closed(vma);
1091         i915_vm_put(vma->vm);
1092
1093         i915_active_fini(&vma->active);
1094         i915_vma_free(vma);
1095 }
1096
1097 void i915_vma_parked(struct intel_gt *gt)
1098 {
1099         struct i915_vma *vma, *next;
1100         LIST_HEAD(closed);
1101
1102         spin_lock_irq(&gt->closed_lock);
1103         list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
1104                 struct drm_i915_gem_object *obj = vma->obj;
1105                 struct i915_address_space *vm = vma->vm;
1106
1107                 /* XXX All to avoid keeping a reference on i915_vma itself */
1108
1109                 if (!kref_get_unless_zero(&obj->base.refcount))
1110                         continue;
1111
1112                 if (!i915_vm_tryopen(vm)) {
1113                         i915_gem_object_put(obj);
1114                         continue;
1115                 }
1116
1117                 list_move(&vma->closed_link, &closed);
1118         }
1119         spin_unlock_irq(&gt->closed_lock);
1120
1121         /* As the GT is held idle, no vma can be reopened as we destroy them */
1122         list_for_each_entry_safe(vma, next, &closed, closed_link) {
1123                 struct drm_i915_gem_object *obj = vma->obj;
1124                 struct i915_address_space *vm = vma->vm;
1125
1126                 INIT_LIST_HEAD(&vma->closed_link);
1127                 __i915_vma_put(vma);
1128
1129                 i915_gem_object_put(obj);
1130                 i915_vm_close(vm);
1131         }
1132 }
1133
1134 static void __i915_vma_iounmap(struct i915_vma *vma)
1135 {
1136         GEM_BUG_ON(i915_vma_is_pinned(vma));
1137
1138         if (vma->iomap == NULL)
1139                 return;
1140
1141         io_mapping_unmap(vma->iomap);
1142         vma->iomap = NULL;
1143 }
1144
1145 void i915_vma_revoke_mmap(struct i915_vma *vma)
1146 {
1147         struct drm_vma_offset_node *node;
1148         u64 vma_offset;
1149
1150         if (!i915_vma_has_userfault(vma))
1151                 return;
1152
1153         GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1154         GEM_BUG_ON(!vma->obj->userfault_count);
1155
1156         node = &vma->mmo->vma_node;
1157         vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
1158         unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1159                             drm_vma_node_offset_addr(node) + vma_offset,
1160                             vma->size,
1161                             1);
1162
1163         i915_vma_unset_userfault(vma);
1164         if (!--vma->obj->userfault_count)
1165                 list_del(&vma->obj->userfault_link);
1166 }
1167
1168 int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1169 {
1170         int err;
1171
1172         GEM_BUG_ON(!i915_vma_is_pinned(vma));
1173
1174         /* Wait for the vma to be bound before we start! */
1175         err = i915_request_await_active(rq, &vma->active, 0);
1176         if (err)
1177                 return err;
1178
1179         return i915_active_add_request(&vma->active, rq);
1180 }
1181
1182 int i915_vma_move_to_active(struct i915_vma *vma,
1183                             struct i915_request *rq,
1184                             unsigned int flags)
1185 {
1186         struct drm_i915_gem_object *obj = vma->obj;
1187         int err;
1188
1189         assert_object_held(obj);
1190
1191         err = __i915_vma_move_to_active(vma, rq);
1192         if (unlikely(err))
1193                 return err;
1194
1195         if (flags & EXEC_OBJECT_WRITE) {
1196                 struct intel_frontbuffer *front;
1197
1198                 front = __intel_frontbuffer_get(obj);
1199                 if (unlikely(front)) {
1200                         if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1201                                 i915_active_add_request(&front->write, rq);
1202                         intel_frontbuffer_put(front);
1203                 }
1204
1205                 dma_resv_add_excl_fence(vma->resv, &rq->fence);
1206                 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1207                 obj->read_domains = 0;
1208         } else {
1209                 err = dma_resv_reserve_shared(vma->resv, 1);
1210                 if (unlikely(err))
1211                         return err;
1212
1213                 dma_resv_add_shared_fence(vma->resv, &rq->fence);
1214                 obj->write_domain = 0;
1215         }
1216         obj->read_domains |= I915_GEM_GPU_DOMAINS;
1217         obj->mm.dirty = true;
1218
1219         GEM_BUG_ON(!i915_vma_is_active(vma));
1220         return 0;
1221 }
1222
1223 int __i915_vma_unbind(struct i915_vma *vma)
1224 {
1225         int ret;
1226
1227         lockdep_assert_held(&vma->vm->mutex);
1228
1229         /*
1230          * First wait upon any activity as retiring the request may
1231          * have side-effects such as unpinning or even unbinding this vma.
1232          *
1233          * XXX Actually waiting under the vm->mutex is a hinderance and
1234          * should be pipelined wherever possible. In cases where that is
1235          * unavoidable, we should lift the wait to before the mutex.
1236          */
1237         ret = i915_vma_sync(vma);
1238         if (ret)
1239                 return ret;
1240
1241         if (i915_vma_is_pinned(vma)) {
1242                 vma_print_allocator(vma, "is pinned");
1243                 return -EAGAIN;
1244         }
1245
1246         /*
1247          * After confirming that no one else is pinning this vma, wait for
1248          * any laggards who may have crept in during the wait (through
1249          * a residual pin skipping the vm->mutex) to complete.
1250          */
1251         ret = i915_vma_sync(vma);
1252         if (ret)
1253                 return ret;
1254
1255         if (!drm_mm_node_allocated(&vma->node))
1256                 return 0;
1257
1258         GEM_BUG_ON(i915_vma_is_pinned(vma));
1259         GEM_BUG_ON(i915_vma_is_active(vma));
1260
1261         if (i915_vma_is_map_and_fenceable(vma)) {
1262                 /*
1263                  * Check that we have flushed all writes through the GGTT
1264                  * before the unbind, other due to non-strict nature of those
1265                  * indirect writes they may end up referencing the GGTT PTE
1266                  * after the unbind.
1267                  *
1268                  * Note that we may be concurrently poking at the GGTT_WRITE
1269                  * bit from set-domain, as we mark all GGTT vma associated
1270                  * with an object. We know this is for another vma, as we
1271                  * are currently unbinding this one -- so if this vma will be
1272                  * reused, it will be refaulted and have its dirty bit set
1273                  * before the next write.
1274                  */
1275                 i915_vma_flush_writes(vma);
1276
1277                 /* release the fence reg _after_ flushing */
1278                 ret = i915_vma_revoke_fence(vma);
1279                 if (ret)
1280                         return ret;
1281
1282                 /* Force a pagefault for domain tracking on next user access */
1283                 i915_vma_revoke_mmap(vma);
1284
1285                 __i915_vma_iounmap(vma);
1286                 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1287         }
1288         GEM_BUG_ON(vma->fence);
1289         GEM_BUG_ON(i915_vma_has_userfault(vma));
1290
1291         if (likely(atomic_read(&vma->vm->open))) {
1292                 trace_i915_vma_unbind(vma);
1293                 vma->ops->unbind_vma(vma);
1294         }
1295         atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1296                    &vma->flags);
1297
1298         i915_vma_detach(vma);
1299         vma_unbind_pages(vma);
1300
1301         drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
1302         return 0;
1303 }
1304
1305 int i915_vma_unbind(struct i915_vma *vma)
1306 {
1307         struct i915_address_space *vm = vma->vm;
1308         intel_wakeref_t wakeref = 0;
1309         int err;
1310
1311         if (!drm_mm_node_allocated(&vma->node))
1312                 return 0;
1313
1314         if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1315                 /* XXX not always required: nop_clear_range */
1316                 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1317
1318         /* Optimistic wait before taking the mutex */
1319         err = i915_vma_sync(vma);
1320         if (err)
1321                 goto out_rpm;
1322
1323         err = mutex_lock_interruptible(&vm->mutex);
1324         if (err)
1325                 goto out_rpm;
1326
1327         err = __i915_vma_unbind(vma);
1328         mutex_unlock(&vm->mutex);
1329
1330 out_rpm:
1331         if (wakeref)
1332                 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
1333         return err;
1334 }
1335
1336 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
1337 {
1338         i915_gem_object_make_unshrinkable(vma->obj);
1339         return vma;
1340 }
1341
1342 void i915_vma_make_shrinkable(struct i915_vma *vma)
1343 {
1344         i915_gem_object_make_shrinkable(vma->obj);
1345 }
1346
1347 void i915_vma_make_purgeable(struct i915_vma *vma)
1348 {
1349         i915_gem_object_make_purgeable(vma->obj);
1350 }
1351
1352 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1353 #include "selftests/i915_vma.c"
1354 #endif
1355
1356 static void i915_global_vma_shrink(void)
1357 {
1358         kmem_cache_shrink(global.slab_vmas);
1359 }
1360
1361 static void i915_global_vma_exit(void)
1362 {
1363         kmem_cache_destroy(global.slab_vmas);
1364 }
1365
1366 static struct i915_global_vma global = { {
1367         .shrink = i915_global_vma_shrink,
1368         .exit = i915_global_vma_exit,
1369 } };
1370
1371 int __init i915_global_vma_init(void)
1372 {
1373         global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
1374         if (!global.slab_vmas)
1375                 return -ENOMEM;
1376
1377         i915_global_register(&global.base);
1378         return 0;
1379 }