Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gem / i915_gem_object.c
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/sched/mm.h>
26
27 #include "display/intel_frontbuffer.h"
28 #include "gt/intel_gt.h"
29 #include "i915_drv.h"
30 #include "i915_gem_clflush.h"
31 #include "i915_gem_context.h"
32 #include "i915_gem_mman.h"
33 #include "i915_gem_object.h"
34 #include "i915_globals.h"
35 #include "i915_trace.h"
36
37 static struct i915_global_object {
38         struct i915_global base;
39         struct kmem_cache *slab_objects;
40 } global;
41
42 struct drm_i915_gem_object *i915_gem_object_alloc(void)
43 {
44         return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
45 }
46
47 void i915_gem_object_free(struct drm_i915_gem_object *obj)
48 {
49         return kmem_cache_free(global.slab_objects, obj);
50 }
51
52 void i915_gem_object_init(struct drm_i915_gem_object *obj,
53                           const struct drm_i915_gem_object_ops *ops,
54                           struct lock_class_key *key)
55 {
56         __mutex_init(&obj->mm.lock, "obj->mm.lock", key);
57
58         spin_lock_init(&obj->vma.lock);
59         INIT_LIST_HEAD(&obj->vma.list);
60
61         INIT_LIST_HEAD(&obj->mm.link);
62
63         INIT_LIST_HEAD(&obj->lut_list);
64
65         spin_lock_init(&obj->mmo.lock);
66         obj->mmo.offsets = RB_ROOT;
67
68         init_rcu_head(&obj->rcu);
69
70         obj->ops = ops;
71
72         obj->mm.madv = I915_MADV_WILLNEED;
73         INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
74         mutex_init(&obj->mm.get_page.lock);
75 }
76
77 /**
78  * Mark up the object's coherency levels for a given cache_level
79  * @obj: #drm_i915_gem_object
80  * @cache_level: cache level
81  */
82 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
83                                          unsigned int cache_level)
84 {
85         obj->cache_level = cache_level;
86
87         if (cache_level != I915_CACHE_NONE)
88                 obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ |
89                                        I915_BO_CACHE_COHERENT_FOR_WRITE);
90         else if (HAS_LLC(to_i915(obj->base.dev)))
91                 obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ;
92         else
93                 obj->cache_coherent = 0;
94
95         obj->cache_dirty =
96                 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
97 }
98
99 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
100 {
101         struct drm_i915_gem_object *obj = to_intel_bo(gem);
102         struct drm_i915_file_private *fpriv = file->driver_priv;
103         struct i915_mmap_offset *mmo, *mn;
104         struct i915_lut_handle *lut, *ln;
105         LIST_HEAD(close);
106
107         i915_gem_object_lock(obj);
108         list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
109                 struct i915_gem_context *ctx = lut->ctx;
110
111                 if (ctx->file_priv != fpriv)
112                         continue;
113
114                 i915_gem_context_get(ctx);
115                 list_move(&lut->obj_link, &close);
116         }
117         i915_gem_object_unlock(obj);
118
119         spin_lock(&obj->mmo.lock);
120         rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
121                 drm_vma_node_revoke(&mmo->vma_node, file);
122         spin_unlock(&obj->mmo.lock);
123
124         list_for_each_entry_safe(lut, ln, &close, obj_link) {
125                 struct i915_gem_context *ctx = lut->ctx;
126                 struct i915_vma *vma;
127
128                 /*
129                  * We allow the process to have multiple handles to the same
130                  * vma, in the same fd namespace, by virtue of flink/open.
131                  */
132
133                 mutex_lock(&ctx->mutex);
134                 vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
135                 if (vma) {
136                         GEM_BUG_ON(vma->obj != obj);
137                         GEM_BUG_ON(!atomic_read(&vma->open_count));
138                         if (atomic_dec_and_test(&vma->open_count) &&
139                             !i915_vma_is_ggtt(vma))
140                                 i915_vma_close(vma);
141                 }
142                 mutex_unlock(&ctx->mutex);
143
144                 i915_gem_context_put(lut->ctx);
145                 i915_lut_handle_free(lut);
146                 i915_gem_object_put(obj);
147         }
148 }
149
150 static void __i915_gem_free_object_rcu(struct rcu_head *head)
151 {
152         struct drm_i915_gem_object *obj =
153                 container_of(head, typeof(*obj), rcu);
154         struct drm_i915_private *i915 = to_i915(obj->base.dev);
155
156         dma_resv_fini(&obj->base._resv);
157         i915_gem_object_free(obj);
158
159         GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
160         atomic_dec(&i915->mm.free_count);
161 }
162
163 static void __i915_gem_free_objects(struct drm_i915_private *i915,
164                                     struct llist_node *freed)
165 {
166         struct drm_i915_gem_object *obj, *on;
167         intel_wakeref_t wakeref;
168
169         wakeref = intel_runtime_pm_get(&i915->runtime_pm);
170         llist_for_each_entry_safe(obj, on, freed, freed) {
171                 struct i915_mmap_offset *mmo, *mn;
172
173                 trace_i915_gem_object_destroy(obj);
174
175                 if (!list_empty(&obj->vma.list)) {
176                         struct i915_vma *vma;
177
178                         /*
179                          * Note that the vma keeps an object reference while
180                          * it is active, so it *should* not sleep while we
181                          * destroy it. Our debug code errs insits it *might*.
182                          * For the moment, play along.
183                          */
184                         spin_lock(&obj->vma.lock);
185                         while ((vma = list_first_entry_or_null(&obj->vma.list,
186                                                                struct i915_vma,
187                                                                obj_link))) {
188                                 GEM_BUG_ON(vma->obj != obj);
189                                 spin_unlock(&obj->vma.lock);
190
191                                 __i915_vma_put(vma);
192
193                                 spin_lock(&obj->vma.lock);
194                         }
195                         spin_unlock(&obj->vma.lock);
196                 }
197
198                 i915_gem_object_release_mmap(obj);
199
200                 rbtree_postorder_for_each_entry_safe(mmo, mn,
201                                                      &obj->mmo.offsets,
202                                                      offset) {
203                         drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
204                                               &mmo->vma_node);
205                         kfree(mmo);
206                 }
207                 obj->mmo.offsets = RB_ROOT;
208
209                 GEM_BUG_ON(atomic_read(&obj->bind_count));
210                 GEM_BUG_ON(obj->userfault_count);
211                 GEM_BUG_ON(!list_empty(&obj->lut_list));
212
213                 atomic_set(&obj->mm.pages_pin_count, 0);
214                 __i915_gem_object_put_pages(obj);
215                 GEM_BUG_ON(i915_gem_object_has_pages(obj));
216                 bitmap_free(obj->bit_17);
217
218                 if (obj->base.import_attach)
219                         drm_prime_gem_destroy(&obj->base, NULL);
220
221                 drm_gem_free_mmap_offset(&obj->base);
222
223                 if (obj->ops->release)
224                         obj->ops->release(obj);
225
226                 /* But keep the pointer alive for RCU-protected lookups */
227                 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
228                 cond_resched();
229         }
230         intel_runtime_pm_put(&i915->runtime_pm, wakeref);
231 }
232
233 void i915_gem_flush_free_objects(struct drm_i915_private *i915)
234 {
235         struct llist_node *freed = llist_del_all(&i915->mm.free_list);
236
237         if (unlikely(freed))
238                 __i915_gem_free_objects(i915, freed);
239 }
240
241 static void __i915_gem_free_work(struct work_struct *work)
242 {
243         struct drm_i915_private *i915 =
244                 container_of(work, struct drm_i915_private, mm.free_work);
245
246         i915_gem_flush_free_objects(i915);
247 }
248
249 void i915_gem_free_object(struct drm_gem_object *gem_obj)
250 {
251         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
252         struct drm_i915_private *i915 = to_i915(obj->base.dev);
253
254         GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
255
256         /*
257          * Before we free the object, make sure any pure RCU-only
258          * read-side critical sections are complete, e.g.
259          * i915_gem_busy_ioctl(). For the corresponding synchronized
260          * lookup see i915_gem_object_lookup_rcu().
261          */
262         atomic_inc(&i915->mm.free_count);
263
264         /*
265          * This serializes freeing with the shrinker. Since the free
266          * is delayed, first by RCU then by the workqueue, we want the
267          * shrinker to be able to free pages of unreferenced objects,
268          * or else we may oom whilst there are plenty of deferred
269          * freed objects.
270          */
271         i915_gem_object_make_unshrinkable(obj);
272
273         /*
274          * Since we require blocking on struct_mutex to unbind the freed
275          * object from the GPU before releasing resources back to the
276          * system, we can not do that directly from the RCU callback (which may
277          * be a softirq context), but must instead then defer that work onto a
278          * kthread. We use the RCU callback rather than move the freed object
279          * directly onto the work queue so that we can mix between using the
280          * worker and performing frees directly from subsequent allocations for
281          * crude but effective memory throttling.
282          */
283         if (llist_add(&obj->freed, &i915->mm.free_list))
284                 queue_work(i915->wq, &i915->mm.free_work);
285 }
286
287 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
288 {
289         return !(obj->cache_level == I915_CACHE_NONE ||
290                  obj->cache_level == I915_CACHE_WT);
291 }
292
293 void
294 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
295                                    unsigned int flush_domains)
296 {
297         struct i915_vma *vma;
298
299         assert_object_held(obj);
300
301         if (!(obj->write_domain & flush_domains))
302                 return;
303
304         switch (obj->write_domain) {
305         case I915_GEM_DOMAIN_GTT:
306                 spin_lock(&obj->vma.lock);
307                 for_each_ggtt_vma(vma, obj) {
308                         if (i915_vma_unset_ggtt_write(vma))
309                                 intel_gt_flush_ggtt_writes(vma->vm->gt);
310                 }
311                 spin_unlock(&obj->vma.lock);
312
313                 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
314                 break;
315
316         case I915_GEM_DOMAIN_WC:
317                 wmb();
318                 break;
319
320         case I915_GEM_DOMAIN_CPU:
321                 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
322                 break;
323
324         case I915_GEM_DOMAIN_RENDER:
325                 if (gpu_write_needs_clflush(obj))
326                         obj->cache_dirty = true;
327                 break;
328         }
329
330         obj->write_domain = 0;
331 }
332
333 void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
334                                          enum fb_op_origin origin)
335 {
336         struct intel_frontbuffer *front;
337
338         front = __intel_frontbuffer_get(obj);
339         if (front) {
340                 intel_frontbuffer_flush(front, origin);
341                 intel_frontbuffer_put(front);
342         }
343 }
344
345 void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
346                                               enum fb_op_origin origin)
347 {
348         struct intel_frontbuffer *front;
349
350         front = __intel_frontbuffer_get(obj);
351         if (front) {
352                 intel_frontbuffer_invalidate(front, origin);
353                 intel_frontbuffer_put(front);
354         }
355 }
356
357 void i915_gem_init__objects(struct drm_i915_private *i915)
358 {
359         INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
360 }
361
362 static void i915_global_objects_shrink(void)
363 {
364         kmem_cache_shrink(global.slab_objects);
365 }
366
367 static void i915_global_objects_exit(void)
368 {
369         kmem_cache_destroy(global.slab_objects);
370 }
371
372 static struct i915_global_object global = { {
373         .shrink = i915_global_objects_shrink,
374         .exit = i915_global_objects_exit,
375 } };
376
377 int __init i915_global_objects_init(void)
378 {
379         global.slab_objects =
380                 KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
381         if (!global.slab_objects)
382                 return -ENOMEM;
383
384         i915_global_register(&global.base);
385         return 0;
386 }
387
388 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
389 #include "selftests/huge_gem_object.c"
390 #include "selftests/huge_pages.c"
391 #include "selftests/i915_gem_object.c"
392 #include "selftests/i915_gem_coherency.c"
393 #endif