2e941f093a20f16429f70c28044e78688d83a03f
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gem / i915_gem_pages.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6
7 #include "i915_drv.h"
8 #include "i915_gem_object.h"
9 #include "i915_scatterlist.h"
10
11 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
12                                  struct sg_table *pages,
13                                  unsigned int sg_page_sizes)
14 {
15         struct drm_i915_private *i915 = to_i915(obj->base.dev);
16         unsigned long supported = INTEL_INFO(i915)->page_sizes;
17         int i;
18
19         lockdep_assert_held(&obj->mm.lock);
20
21         /* Make the pages coherent with the GPU (flushing any swapin). */
22         if (obj->cache_dirty) {
23                 obj->write_domain = 0;
24                 if (i915_gem_object_has_struct_page(obj))
25                         drm_clflush_sg(pages);
26                 obj->cache_dirty = false;
27         }
28
29         obj->mm.get_page.sg_pos = pages->sgl;
30         obj->mm.get_page.sg_idx = 0;
31
32         obj->mm.pages = pages;
33
34         if (i915_gem_object_is_tiled(obj) &&
35             i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
36                 GEM_BUG_ON(obj->mm.quirked);
37                 __i915_gem_object_pin_pages(obj);
38                 obj->mm.quirked = true;
39         }
40
41         GEM_BUG_ON(!sg_page_sizes);
42         obj->mm.page_sizes.phys = sg_page_sizes;
43
44         /*
45          * Calculate the supported page-sizes which fit into the given
46          * sg_page_sizes. This will give us the page-sizes which we may be able
47          * to use opportunistically when later inserting into the GTT. For
48          * example if phys=2G, then in theory we should be able to use 1G, 2M,
49          * 64K or 4K pages, although in practice this will depend on a number of
50          * other factors.
51          */
52         obj->mm.page_sizes.sg = 0;
53         for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
54                 if (obj->mm.page_sizes.phys & ~0u << i)
55                         obj->mm.page_sizes.sg |= BIT(i);
56         }
57         GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
58
59         if (i915_gem_object_is_shrinkable(obj)) {
60                 struct list_head *list;
61                 unsigned long flags;
62
63                 spin_lock_irqsave(&i915->mm.obj_lock, flags);
64
65                 i915->mm.shrink_count++;
66                 i915->mm.shrink_memory += obj->base.size;
67
68                 if (obj->mm.madv != I915_MADV_WILLNEED)
69                         list = &i915->mm.purge_list;
70                 else
71                         list = &i915->mm.shrink_list;
72                 list_add_tail(&obj->mm.link, list);
73
74                 atomic_set(&obj->mm.shrink_pin, 0);
75                 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
76         }
77 }
78
79 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
80 {
81         int err;
82
83         if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
84                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
85                 return -EFAULT;
86         }
87
88         err = obj->ops->get_pages(obj);
89         GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
90
91         return err;
92 }
93
94 /* Ensure that the associated pages are gathered from the backing storage
95  * and pinned into our object. i915_gem_object_pin_pages() may be called
96  * multiple times before they are released by a single call to
97  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
98  * either as a result of memory pressure (reaping pages under the shrinker)
99  * or as the object is itself released.
100  */
101 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
102 {
103         int err;
104
105         err = mutex_lock_interruptible(&obj->mm.lock);
106         if (err)
107                 return err;
108
109         if (unlikely(!i915_gem_object_has_pages(obj))) {
110                 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
111
112                 err = ____i915_gem_object_get_pages(obj);
113                 if (err)
114                         goto unlock;
115
116                 smp_mb__before_atomic();
117         }
118         atomic_inc(&obj->mm.pages_pin_count);
119
120 unlock:
121         mutex_unlock(&obj->mm.lock);
122         return err;
123 }
124
125 /* Immediately discard the backing storage */
126 void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
127 {
128         drm_gem_free_mmap_offset(&obj->base);
129         if (obj->ops->truncate)
130                 obj->ops->truncate(obj);
131 }
132
133 /* Try to discard unwanted pages */
134 void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
135 {
136         lockdep_assert_held(&obj->mm.lock);
137         GEM_BUG_ON(i915_gem_object_has_pages(obj));
138
139         if (obj->ops->writeback)
140                 obj->ops->writeback(obj);
141 }
142
143 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
144 {
145         struct radix_tree_iter iter;
146         void __rcu **slot;
147
148         rcu_read_lock();
149         radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
150                 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
151         rcu_read_unlock();
152 }
153
154 struct sg_table *
155 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
156 {
157         struct sg_table *pages;
158
159         pages = fetch_and_zero(&obj->mm.pages);
160         if (IS_ERR_OR_NULL(pages))
161                 return pages;
162
163         i915_gem_object_make_unshrinkable(obj);
164
165         if (obj->mm.mapping) {
166                 void *ptr;
167
168                 ptr = page_mask_bits(obj->mm.mapping);
169                 if (is_vmalloc_addr(ptr))
170                         vunmap(ptr);
171                 else
172                         kunmap(kmap_to_page(ptr));
173
174                 obj->mm.mapping = NULL;
175         }
176
177         __i915_gem_object_reset_page_iter(obj);
178         obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
179
180         return pages;
181 }
182
183 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
184                                 enum i915_mm_subclass subclass)
185 {
186         struct sg_table *pages;
187         int err;
188
189         if (i915_gem_object_has_pinned_pages(obj))
190                 return -EBUSY;
191
192         GEM_BUG_ON(atomic_read(&obj->bind_count));
193
194         /* May be called by shrinker from within get_pages() (on another bo) */
195         mutex_lock_nested(&obj->mm.lock, subclass);
196         if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
197                 err = -EBUSY;
198                 goto unlock;
199         }
200
201         /*
202          * ->put_pages might need to allocate memory for the bit17 swizzle
203          * array, hence protect them from being reaped by removing them from gtt
204          * lists early.
205          */
206         pages = __i915_gem_object_unset_pages(obj);
207
208         /*
209          * XXX Temporary hijinx to avoid updating all backends to handle
210          * NULL pages. In the future, when we have more asynchronous
211          * get_pages backends we should be better able to handle the
212          * cancellation of the async task in a more uniform manner.
213          */
214         if (!pages && !i915_gem_object_needs_async_cancel(obj))
215                 pages = ERR_PTR(-EINVAL);
216
217         if (!IS_ERR(pages))
218                 obj->ops->put_pages(obj, pages);
219
220         err = 0;
221 unlock:
222         mutex_unlock(&obj->mm.lock);
223
224         return err;
225 }
226
227 /* The 'mapping' part of i915_gem_object_pin_map() below */
228 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
229                                  enum i915_map_type type)
230 {
231         unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
232         struct sg_table *sgt = obj->mm.pages;
233         struct sgt_iter sgt_iter;
234         struct page *page;
235         struct page *stack_pages[32];
236         struct page **pages = stack_pages;
237         unsigned long i = 0;
238         pgprot_t pgprot;
239         void *addr;
240
241         /* A single page can always be kmapped */
242         if (n_pages == 1 && type == I915_MAP_WB)
243                 return kmap(sg_page(sgt->sgl));
244
245         if (n_pages > ARRAY_SIZE(stack_pages)) {
246                 /* Too big for stack -- allocate temporary array instead */
247                 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
248                 if (!pages)
249                         return NULL;
250         }
251
252         for_each_sgt_page(page, sgt_iter, sgt)
253                 pages[i++] = page;
254
255         /* Check that we have the expected number of pages */
256         GEM_BUG_ON(i != n_pages);
257
258         switch (type) {
259         default:
260                 MISSING_CASE(type);
261                 /* fallthrough - to use PAGE_KERNEL anyway */
262         case I915_MAP_WB:
263                 pgprot = PAGE_KERNEL;
264                 break;
265         case I915_MAP_WC:
266                 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
267                 break;
268         }
269         addr = vmap(pages, n_pages, 0, pgprot);
270
271         if (pages != stack_pages)
272                 kvfree(pages);
273
274         return addr;
275 }
276
277 /* get, pin, and map the pages of the object into kernel space */
278 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
279                               enum i915_map_type type)
280 {
281         enum i915_map_type has_type;
282         bool pinned;
283         void *ptr;
284         int err;
285
286         if (unlikely(!i915_gem_object_has_struct_page(obj)))
287                 return ERR_PTR(-ENXIO);
288
289         err = mutex_lock_interruptible(&obj->mm.lock);
290         if (err)
291                 return ERR_PTR(err);
292
293         pinned = !(type & I915_MAP_OVERRIDE);
294         type &= ~I915_MAP_OVERRIDE;
295
296         if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
297                 if (unlikely(!i915_gem_object_has_pages(obj))) {
298                         GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
299
300                         err = ____i915_gem_object_get_pages(obj);
301                         if (err)
302                                 goto err_unlock;
303
304                         smp_mb__before_atomic();
305                 }
306                 atomic_inc(&obj->mm.pages_pin_count);
307                 pinned = false;
308         }
309         GEM_BUG_ON(!i915_gem_object_has_pages(obj));
310
311         ptr = page_unpack_bits(obj->mm.mapping, &has_type);
312         if (ptr && has_type != type) {
313                 if (pinned) {
314                         err = -EBUSY;
315                         goto err_unpin;
316                 }
317
318                 if (is_vmalloc_addr(ptr))
319                         vunmap(ptr);
320                 else
321                         kunmap(kmap_to_page(ptr));
322
323                 ptr = obj->mm.mapping = NULL;
324         }
325
326         if (!ptr) {
327                 ptr = i915_gem_object_map(obj, type);
328                 if (!ptr) {
329                         err = -ENOMEM;
330                         goto err_unpin;
331                 }
332
333                 obj->mm.mapping = page_pack_bits(ptr, type);
334         }
335
336 out_unlock:
337         mutex_unlock(&obj->mm.lock);
338         return ptr;
339
340 err_unpin:
341         atomic_dec(&obj->mm.pages_pin_count);
342 err_unlock:
343         ptr = ERR_PTR(err);
344         goto out_unlock;
345 }
346
347 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
348                                  unsigned long offset,
349                                  unsigned long size)
350 {
351         enum i915_map_type has_type;
352         void *ptr;
353
354         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
355         GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
356                                      offset, size, obj->base.size));
357
358         obj->mm.dirty = true;
359
360         if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
361                 return;
362
363         ptr = page_unpack_bits(obj->mm.mapping, &has_type);
364         if (has_type == I915_MAP_WC)
365                 return;
366
367         drm_clflush_virt_range(ptr + offset, size);
368         if (size == obj->base.size) {
369                 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
370                 obj->cache_dirty = false;
371         }
372 }
373
374 struct scatterlist *
375 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
376                        unsigned int n,
377                        unsigned int *offset)
378 {
379         struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
380         struct scatterlist *sg;
381         unsigned int idx, count;
382
383         might_sleep();
384         GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
385         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
386
387         /* As we iterate forward through the sg, we record each entry in a
388          * radixtree for quick repeated (backwards) lookups. If we have seen
389          * this index previously, we will have an entry for it.
390          *
391          * Initial lookup is O(N), but this is amortized to O(1) for
392          * sequential page access (where each new request is consecutive
393          * to the previous one). Repeated lookups are O(lg(obj->base.size)),
394          * i.e. O(1) with a large constant!
395          */
396         if (n < READ_ONCE(iter->sg_idx))
397                 goto lookup;
398
399         mutex_lock(&iter->lock);
400
401         /* We prefer to reuse the last sg so that repeated lookup of this
402          * (or the subsequent) sg are fast - comparing against the last
403          * sg is faster than going through the radixtree.
404          */
405
406         sg = iter->sg_pos;
407         idx = iter->sg_idx;
408         count = __sg_page_count(sg);
409
410         while (idx + count <= n) {
411                 void *entry;
412                 unsigned long i;
413                 int ret;
414
415                 /* If we cannot allocate and insert this entry, or the
416                  * individual pages from this range, cancel updating the
417                  * sg_idx so that on this lookup we are forced to linearly
418                  * scan onwards, but on future lookups we will try the
419                  * insertion again (in which case we need to be careful of
420                  * the error return reporting that we have already inserted
421                  * this index).
422                  */
423                 ret = radix_tree_insert(&iter->radix, idx, sg);
424                 if (ret && ret != -EEXIST)
425                         goto scan;
426
427                 entry = xa_mk_value(idx);
428                 for (i = 1; i < count; i++) {
429                         ret = radix_tree_insert(&iter->radix, idx + i, entry);
430                         if (ret && ret != -EEXIST)
431                                 goto scan;
432                 }
433
434                 idx += count;
435                 sg = ____sg_next(sg);
436                 count = __sg_page_count(sg);
437         }
438
439 scan:
440         iter->sg_pos = sg;
441         iter->sg_idx = idx;
442
443         mutex_unlock(&iter->lock);
444
445         if (unlikely(n < idx)) /* insertion completed by another thread */
446                 goto lookup;
447
448         /* In case we failed to insert the entry into the radixtree, we need
449          * to look beyond the current sg.
450          */
451         while (idx + count <= n) {
452                 idx += count;
453                 sg = ____sg_next(sg);
454                 count = __sg_page_count(sg);
455         }
456
457         *offset = n - idx;
458         return sg;
459
460 lookup:
461         rcu_read_lock();
462
463         sg = radix_tree_lookup(&iter->radix, n);
464         GEM_BUG_ON(!sg);
465
466         /* If this index is in the middle of multi-page sg entry,
467          * the radix tree will contain a value entry that points
468          * to the start of that range. We will return the pointer to
469          * the base page and the offset of this page within the
470          * sg entry's range.
471          */
472         *offset = 0;
473         if (unlikely(xa_is_value(sg))) {
474                 unsigned long base = xa_to_value(sg);
475
476                 sg = radix_tree_lookup(&iter->radix, base);
477                 GEM_BUG_ON(!sg);
478
479                 *offset = n - base;
480         }
481
482         rcu_read_unlock();
483
484         return sg;
485 }
486
487 struct page *
488 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
489 {
490         struct scatterlist *sg;
491         unsigned int offset;
492
493         GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
494
495         sg = i915_gem_object_get_sg(obj, n, &offset);
496         return nth_page(sg_page(sg), offset);
497 }
498
499 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
500 struct page *
501 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
502                                unsigned int n)
503 {
504         struct page *page;
505
506         page = i915_gem_object_get_page(obj, n);
507         if (!obj->mm.dirty)
508                 set_page_dirty(page);
509
510         return page;
511 }
512
513 dma_addr_t
514 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
515                                     unsigned long n,
516                                     unsigned int *len)
517 {
518         struct scatterlist *sg;
519         unsigned int offset;
520
521         sg = i915_gem_object_get_sg(obj, n, &offset);
522
523         if (len)
524                 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
525
526         return sg_dma_address(sg) + (offset << PAGE_SHIFT);
527 }
528
529 dma_addr_t
530 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
531                                 unsigned long n)
532 {
533         return i915_gem_object_get_dma_address_len(obj, n, NULL);
534 }