Merge branches 'clk-range', 'clk-uniphier', 'clk-apple' and 'clk-qcom' into clk-next
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gem / i915_gem_pages.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6
7 #include "i915_drv.h"
8 #include "i915_gem_object.h"
9 #include "i915_scatterlist.h"
10 #include "i915_gem_lmem.h"
11 #include "i915_gem_mman.h"
12
13 #include "gt/intel_gt.h"
14
15 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
16                                  struct sg_table *pages,
17                                  unsigned int sg_page_sizes)
18 {
19         struct drm_i915_private *i915 = to_i915(obj->base.dev);
20         unsigned long supported = INTEL_INFO(i915)->page_sizes;
21         bool shrinkable;
22         int i;
23
24         assert_object_held_shared(obj);
25
26         if (i915_gem_object_is_volatile(obj))
27                 obj->mm.madv = I915_MADV_DONTNEED;
28
29         /* Make the pages coherent with the GPU (flushing any swapin). */
30         if (obj->cache_dirty) {
31                 WARN_ON_ONCE(IS_DGFX(i915));
32                 obj->write_domain = 0;
33                 if (i915_gem_object_has_struct_page(obj))
34                         drm_clflush_sg(pages);
35                 obj->cache_dirty = false;
36         }
37
38         obj->mm.get_page.sg_pos = pages->sgl;
39         obj->mm.get_page.sg_idx = 0;
40         obj->mm.get_dma_page.sg_pos = pages->sgl;
41         obj->mm.get_dma_page.sg_idx = 0;
42
43         obj->mm.pages = pages;
44
45         GEM_BUG_ON(!sg_page_sizes);
46         obj->mm.page_sizes.phys = sg_page_sizes;
47
48         /*
49          * Calculate the supported page-sizes which fit into the given
50          * sg_page_sizes. This will give us the page-sizes which we may be able
51          * to use opportunistically when later inserting into the GTT. For
52          * example if phys=2G, then in theory we should be able to use 1G, 2M,
53          * 64K or 4K pages, although in practice this will depend on a number of
54          * other factors.
55          */
56         obj->mm.page_sizes.sg = 0;
57         for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
58                 if (obj->mm.page_sizes.phys & ~0u << i)
59                         obj->mm.page_sizes.sg |= BIT(i);
60         }
61         GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
62
63         shrinkable = i915_gem_object_is_shrinkable(obj);
64
65         if (i915_gem_object_is_tiled(obj) &&
66             i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
67                 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
68                 i915_gem_object_set_tiling_quirk(obj);
69                 GEM_BUG_ON(!list_empty(&obj->mm.link));
70                 atomic_inc(&obj->mm.shrink_pin);
71                 shrinkable = false;
72         }
73
74         if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) {
75                 struct list_head *list;
76                 unsigned long flags;
77
78                 assert_object_held(obj);
79                 spin_lock_irqsave(&i915->mm.obj_lock, flags);
80
81                 i915->mm.shrink_count++;
82                 i915->mm.shrink_memory += obj->base.size;
83
84                 if (obj->mm.madv != I915_MADV_WILLNEED)
85                         list = &i915->mm.purge_list;
86                 else
87                         list = &i915->mm.shrink_list;
88                 list_add_tail(&obj->mm.link, list);
89
90                 atomic_set(&obj->mm.shrink_pin, 0);
91                 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
92         }
93 }
94
95 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
96 {
97         struct drm_i915_private *i915 = to_i915(obj->base.dev);
98         int err;
99
100         assert_object_held_shared(obj);
101
102         if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
103                 drm_dbg(&i915->drm,
104                         "Attempting to obtain a purgeable object\n");
105                 return -EFAULT;
106         }
107
108         err = obj->ops->get_pages(obj);
109         GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
110
111         return err;
112 }
113
114 /* Ensure that the associated pages are gathered from the backing storage
115  * and pinned into our object. i915_gem_object_pin_pages() may be called
116  * multiple times before they are released by a single call to
117  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
118  * either as a result of memory pressure (reaping pages under the shrinker)
119  * or as the object is itself released.
120  */
121 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
122 {
123         int err;
124
125         assert_object_held(obj);
126
127         assert_object_held_shared(obj);
128
129         if (unlikely(!i915_gem_object_has_pages(obj))) {
130                 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
131
132                 err = ____i915_gem_object_get_pages(obj);
133                 if (err)
134                         return err;
135
136                 smp_mb__before_atomic();
137         }
138         atomic_inc(&obj->mm.pages_pin_count);
139
140         return 0;
141 }
142
143 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
144 {
145         struct i915_gem_ww_ctx ww;
146         int err;
147
148         i915_gem_ww_ctx_init(&ww, true);
149 retry:
150         err = i915_gem_object_lock(obj, &ww);
151         if (!err)
152                 err = i915_gem_object_pin_pages(obj);
153
154         if (err == -EDEADLK) {
155                 err = i915_gem_ww_ctx_backoff(&ww);
156                 if (!err)
157                         goto retry;
158         }
159         i915_gem_ww_ctx_fini(&ww);
160         return err;
161 }
162
163 /* Immediately discard the backing storage */
164 int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
165 {
166         if (obj->ops->truncate)
167                 return obj->ops->truncate(obj);
168
169         return 0;
170 }
171
172 /* Try to discard unwanted pages */
173 void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
174 {
175         assert_object_held_shared(obj);
176         GEM_BUG_ON(i915_gem_object_has_pages(obj));
177
178         if (obj->ops->writeback)
179                 obj->ops->writeback(obj);
180 }
181
182 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
183 {
184         struct radix_tree_iter iter;
185         void __rcu **slot;
186
187         rcu_read_lock();
188         radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
189                 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
190         radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
191                 radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
192         rcu_read_unlock();
193 }
194
195 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
196 {
197         if (is_vmalloc_addr(ptr))
198                 vunmap(ptr);
199 }
200
201 struct sg_table *
202 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
203 {
204         struct sg_table *pages;
205
206         assert_object_held_shared(obj);
207
208         pages = fetch_and_zero(&obj->mm.pages);
209         if (IS_ERR_OR_NULL(pages))
210                 return pages;
211
212         if (i915_gem_object_is_volatile(obj))
213                 obj->mm.madv = I915_MADV_WILLNEED;
214
215         if (!i915_gem_object_has_self_managed_shrink_list(obj))
216                 i915_gem_object_make_unshrinkable(obj);
217
218         if (obj->mm.mapping) {
219                 unmap_object(obj, page_mask_bits(obj->mm.mapping));
220                 obj->mm.mapping = NULL;
221         }
222
223         __i915_gem_object_reset_page_iter(obj);
224         obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
225
226         if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
227                 struct drm_i915_private *i915 = to_i915(obj->base.dev);
228                 intel_wakeref_t wakeref;
229
230                 with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
231                         intel_gt_invalidate_tlbs(to_gt(i915));
232         }
233
234         return pages;
235 }
236
237 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
238 {
239         struct sg_table *pages;
240
241         if (i915_gem_object_has_pinned_pages(obj))
242                 return -EBUSY;
243
244         /* May be called by shrinker from within get_pages() (on another bo) */
245         assert_object_held_shared(obj);
246
247         i915_gem_object_release_mmap_offset(obj);
248
249         /*
250          * ->put_pages might need to allocate memory for the bit17 swizzle
251          * array, hence protect them from being reaped by removing them from gtt
252          * lists early.
253          */
254         pages = __i915_gem_object_unset_pages(obj);
255
256         /*
257          * XXX Temporary hijinx to avoid updating all backends to handle
258          * NULL pages. In the future, when we have more asynchronous
259          * get_pages backends we should be better able to handle the
260          * cancellation of the async task in a more uniform manner.
261          */
262         if (!IS_ERR_OR_NULL(pages))
263                 obj->ops->put_pages(obj, pages);
264
265         return 0;
266 }
267
268 /* The 'mapping' part of i915_gem_object_pin_map() below */
269 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
270                                       enum i915_map_type type)
271 {
272         unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
273         struct page *stack[32], **pages = stack, *page;
274         struct sgt_iter iter;
275         pgprot_t pgprot;
276         void *vaddr;
277
278         switch (type) {
279         default:
280                 MISSING_CASE(type);
281                 fallthrough;    /* to use PAGE_KERNEL anyway */
282         case I915_MAP_WB:
283                 /*
284                  * On 32b, highmem using a finite set of indirect PTE (i.e.
285                  * vmap) to provide virtual mappings of the high pages.
286                  * As these are finite, map_new_virtual() must wait for some
287                  * other kmap() to finish when it runs out. If we map a large
288                  * number of objects, there is no method for it to tell us
289                  * to release the mappings, and we deadlock.
290                  *
291                  * However, if we make an explicit vmap of the page, that
292                  * uses a larger vmalloc arena, and also has the ability
293                  * to tell us to release unwanted mappings. Most importantly,
294                  * it will fail and propagate an error instead of waiting
295                  * forever.
296                  *
297                  * So if the page is beyond the 32b boundary, make an explicit
298                  * vmap.
299                  */
300                 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
301                         return page_address(sg_page(obj->mm.pages->sgl));
302                 pgprot = PAGE_KERNEL;
303                 break;
304         case I915_MAP_WC:
305                 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
306                 break;
307         }
308
309         if (n_pages > ARRAY_SIZE(stack)) {
310                 /* Too big for stack -- allocate temporary array instead */
311                 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
312                 if (!pages)
313                         return ERR_PTR(-ENOMEM);
314         }
315
316         i = 0;
317         for_each_sgt_page(page, iter, obj->mm.pages)
318                 pages[i++] = page;
319         vaddr = vmap(pages, n_pages, 0, pgprot);
320         if (pages != stack)
321                 kvfree(pages);
322
323         return vaddr ?: ERR_PTR(-ENOMEM);
324 }
325
326 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
327                                      enum i915_map_type type)
328 {
329         resource_size_t iomap = obj->mm.region->iomap.base -
330                 obj->mm.region->region.start;
331         unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
332         unsigned long stack[32], *pfns = stack, i;
333         struct sgt_iter iter;
334         dma_addr_t addr;
335         void *vaddr;
336
337         GEM_BUG_ON(type != I915_MAP_WC);
338
339         if (n_pfn > ARRAY_SIZE(stack)) {
340                 /* Too big for stack -- allocate temporary array instead */
341                 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
342                 if (!pfns)
343                         return ERR_PTR(-ENOMEM);
344         }
345
346         i = 0;
347         for_each_sgt_daddr(addr, iter, obj->mm.pages)
348                 pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
349         vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
350         if (pfns != stack)
351                 kvfree(pfns);
352
353         return vaddr ?: ERR_PTR(-ENOMEM);
354 }
355
356 /* get, pin, and map the pages of the object into kernel space */
357 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
358                               enum i915_map_type type)
359 {
360         enum i915_map_type has_type;
361         bool pinned;
362         void *ptr;
363         int err;
364
365         if (!i915_gem_object_has_struct_page(obj) &&
366             !i915_gem_object_has_iomem(obj))
367                 return ERR_PTR(-ENXIO);
368
369         assert_object_held(obj);
370
371         pinned = !(type & I915_MAP_OVERRIDE);
372         type &= ~I915_MAP_OVERRIDE;
373
374         if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
375                 if (unlikely(!i915_gem_object_has_pages(obj))) {
376                         GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
377
378                         err = ____i915_gem_object_get_pages(obj);
379                         if (err)
380                                 return ERR_PTR(err);
381
382                         smp_mb__before_atomic();
383                 }
384                 atomic_inc(&obj->mm.pages_pin_count);
385                 pinned = false;
386         }
387         GEM_BUG_ON(!i915_gem_object_has_pages(obj));
388
389         /*
390          * For discrete our CPU mappings needs to be consistent in order to
391          * function correctly on !x86. When mapping things through TTM, we use
392          * the same rules to determine the caching type.
393          *
394          * The caching rules, starting from DG1:
395          *
396          *      - If the object can be placed in device local-memory, then the
397          *        pages should be allocated and mapped as write-combined only.
398          *
399          *      - Everything else is always allocated and mapped as write-back,
400          *        with the guarantee that everything is also coherent with the
401          *        GPU.
402          *
403          * Internal users of lmem are already expected to get this right, so no
404          * fudging needed there.
405          */
406         if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) {
407                 if (type != I915_MAP_WC && !obj->mm.n_placements) {
408                         ptr = ERR_PTR(-ENODEV);
409                         goto err_unpin;
410                 }
411
412                 type = I915_MAP_WC;
413         } else if (IS_DGFX(to_i915(obj->base.dev))) {
414                 type = I915_MAP_WB;
415         }
416
417         ptr = page_unpack_bits(obj->mm.mapping, &has_type);
418         if (ptr && has_type != type) {
419                 if (pinned) {
420                         ptr = ERR_PTR(-EBUSY);
421                         goto err_unpin;
422                 }
423
424                 unmap_object(obj, ptr);
425
426                 ptr = obj->mm.mapping = NULL;
427         }
428
429         if (!ptr) {
430                 err = i915_gem_object_wait_moving_fence(obj, true);
431                 if (err) {
432                         ptr = ERR_PTR(err);
433                         goto err_unpin;
434                 }
435
436                 if (GEM_WARN_ON(type == I915_MAP_WC && !pat_enabled()))
437                         ptr = ERR_PTR(-ENODEV);
438                 else if (i915_gem_object_has_struct_page(obj))
439                         ptr = i915_gem_object_map_page(obj, type);
440                 else
441                         ptr = i915_gem_object_map_pfn(obj, type);
442                 if (IS_ERR(ptr))
443                         goto err_unpin;
444
445                 obj->mm.mapping = page_pack_bits(ptr, type);
446         }
447
448         return ptr;
449
450 err_unpin:
451         atomic_dec(&obj->mm.pages_pin_count);
452         return ptr;
453 }
454
455 void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
456                                        enum i915_map_type type)
457 {
458         void *ret;
459
460         i915_gem_object_lock(obj, NULL);
461         ret = i915_gem_object_pin_map(obj, type);
462         i915_gem_object_unlock(obj);
463
464         return ret;
465 }
466
467 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
468                                  unsigned long offset,
469                                  unsigned long size)
470 {
471         enum i915_map_type has_type;
472         void *ptr;
473
474         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
475         GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
476                                      offset, size, obj->base.size));
477
478         wmb(); /* let all previous writes be visible to coherent partners */
479         obj->mm.dirty = true;
480
481         if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
482                 return;
483
484         ptr = page_unpack_bits(obj->mm.mapping, &has_type);
485         if (has_type == I915_MAP_WC)
486                 return;
487
488         drm_clflush_virt_range(ptr + offset, size);
489         if (size == obj->base.size) {
490                 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
491                 obj->cache_dirty = false;
492         }
493 }
494
495 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
496 {
497         GEM_BUG_ON(!obj->mm.mapping);
498
499         /*
500          * We allow removing the mapping from underneath pinned pages!
501          *
502          * Furthermore, since this is an unsafe operation reserved only
503          * for construction time manipulation, we ignore locking prudence.
504          */
505         unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
506
507         i915_gem_object_unpin_map(obj);
508 }
509
510 struct scatterlist *
511 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
512                          struct i915_gem_object_page_iter *iter,
513                          unsigned int n,
514                          unsigned int *offset,
515                          bool dma)
516 {
517         struct scatterlist *sg;
518         unsigned int idx, count;
519
520         might_sleep();
521         GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
522         if (!i915_gem_object_has_pinned_pages(obj))
523                 assert_object_held(obj);
524
525         /* As we iterate forward through the sg, we record each entry in a
526          * radixtree for quick repeated (backwards) lookups. If we have seen
527          * this index previously, we will have an entry for it.
528          *
529          * Initial lookup is O(N), but this is amortized to O(1) for
530          * sequential page access (where each new request is consecutive
531          * to the previous one). Repeated lookups are O(lg(obj->base.size)),
532          * i.e. O(1) with a large constant!
533          */
534         if (n < READ_ONCE(iter->sg_idx))
535                 goto lookup;
536
537         mutex_lock(&iter->lock);
538
539         /* We prefer to reuse the last sg so that repeated lookup of this
540          * (or the subsequent) sg are fast - comparing against the last
541          * sg is faster than going through the radixtree.
542          */
543
544         sg = iter->sg_pos;
545         idx = iter->sg_idx;
546         count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
547
548         while (idx + count <= n) {
549                 void *entry;
550                 unsigned long i;
551                 int ret;
552
553                 /* If we cannot allocate and insert this entry, or the
554                  * individual pages from this range, cancel updating the
555                  * sg_idx so that on this lookup we are forced to linearly
556                  * scan onwards, but on future lookups we will try the
557                  * insertion again (in which case we need to be careful of
558                  * the error return reporting that we have already inserted
559                  * this index).
560                  */
561                 ret = radix_tree_insert(&iter->radix, idx, sg);
562                 if (ret && ret != -EEXIST)
563                         goto scan;
564
565                 entry = xa_mk_value(idx);
566                 for (i = 1; i < count; i++) {
567                         ret = radix_tree_insert(&iter->radix, idx + i, entry);
568                         if (ret && ret != -EEXIST)
569                                 goto scan;
570                 }
571
572                 idx += count;
573                 sg = ____sg_next(sg);
574                 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
575         }
576
577 scan:
578         iter->sg_pos = sg;
579         iter->sg_idx = idx;
580
581         mutex_unlock(&iter->lock);
582
583         if (unlikely(n < idx)) /* insertion completed by another thread */
584                 goto lookup;
585
586         /* In case we failed to insert the entry into the radixtree, we need
587          * to look beyond the current sg.
588          */
589         while (idx + count <= n) {
590                 idx += count;
591                 sg = ____sg_next(sg);
592                 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
593         }
594
595         *offset = n - idx;
596         return sg;
597
598 lookup:
599         rcu_read_lock();
600
601         sg = radix_tree_lookup(&iter->radix, n);
602         GEM_BUG_ON(!sg);
603
604         /* If this index is in the middle of multi-page sg entry,
605          * the radix tree will contain a value entry that points
606          * to the start of that range. We will return the pointer to
607          * the base page and the offset of this page within the
608          * sg entry's range.
609          */
610         *offset = 0;
611         if (unlikely(xa_is_value(sg))) {
612                 unsigned long base = xa_to_value(sg);
613
614                 sg = radix_tree_lookup(&iter->radix, base);
615                 GEM_BUG_ON(!sg);
616
617                 *offset = n - base;
618         }
619
620         rcu_read_unlock();
621
622         return sg;
623 }
624
625 struct page *
626 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
627 {
628         struct scatterlist *sg;
629         unsigned int offset;
630
631         GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
632
633         sg = i915_gem_object_get_sg(obj, n, &offset);
634         return nth_page(sg_page(sg), offset);
635 }
636
637 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
638 struct page *
639 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
640                                unsigned int n)
641 {
642         struct page *page;
643
644         page = i915_gem_object_get_page(obj, n);
645         if (!obj->mm.dirty)
646                 set_page_dirty(page);
647
648         return page;
649 }
650
651 dma_addr_t
652 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
653                                     unsigned long n,
654                                     unsigned int *len)
655 {
656         struct scatterlist *sg;
657         unsigned int offset;
658
659         sg = i915_gem_object_get_sg_dma(obj, n, &offset);
660
661         if (len)
662                 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
663
664         return sg_dma_address(sg) + (offset << PAGE_SHIFT);
665 }
666
667 dma_addr_t
668 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
669                                 unsigned long n)
670 {
671         return i915_gem_object_get_dma_address_len(obj, n, NULL);
672 }