Merge remote-tracking branch 'spi/for-5.14' into spi-next
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gem / i915_gem_pages.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6
7 #include "i915_drv.h"
8 #include "i915_gem_object.h"
9 #include "i915_scatterlist.h"
10 #include "i915_gem_lmem.h"
11 #include "i915_gem_mman.h"
12
13 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
14                                  struct sg_table *pages,
15                                  unsigned int sg_page_sizes)
16 {
17         struct drm_i915_private *i915 = to_i915(obj->base.dev);
18         unsigned long supported = INTEL_INFO(i915)->page_sizes;
19         bool shrinkable;
20         int i;
21
22         assert_object_held_shared(obj);
23
24         if (i915_gem_object_is_volatile(obj))
25                 obj->mm.madv = I915_MADV_DONTNEED;
26
27         /* Make the pages coherent with the GPU (flushing any swapin). */
28         if (obj->cache_dirty) {
29                 obj->write_domain = 0;
30                 if (i915_gem_object_has_struct_page(obj))
31                         drm_clflush_sg(pages);
32                 obj->cache_dirty = false;
33         }
34
35         obj->mm.get_page.sg_pos = pages->sgl;
36         obj->mm.get_page.sg_idx = 0;
37         obj->mm.get_dma_page.sg_pos = pages->sgl;
38         obj->mm.get_dma_page.sg_idx = 0;
39
40         obj->mm.pages = pages;
41
42         GEM_BUG_ON(!sg_page_sizes);
43         obj->mm.page_sizes.phys = sg_page_sizes;
44
45         /*
46          * Calculate the supported page-sizes which fit into the given
47          * sg_page_sizes. This will give us the page-sizes which we may be able
48          * to use opportunistically when later inserting into the GTT. For
49          * example if phys=2G, then in theory we should be able to use 1G, 2M,
50          * 64K or 4K pages, although in practice this will depend on a number of
51          * other factors.
52          */
53         obj->mm.page_sizes.sg = 0;
54         for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
55                 if (obj->mm.page_sizes.phys & ~0u << i)
56                         obj->mm.page_sizes.sg |= BIT(i);
57         }
58         GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
59
60         shrinkable = i915_gem_object_is_shrinkable(obj);
61
62         if (i915_gem_object_is_tiled(obj) &&
63             i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
64                 GEM_BUG_ON(i915_gem_object_has_tiling_quirk(obj));
65                 i915_gem_object_set_tiling_quirk(obj);
66                 GEM_BUG_ON(!list_empty(&obj->mm.link));
67                 atomic_inc(&obj->mm.shrink_pin);
68                 shrinkable = false;
69         }
70
71         if (shrinkable) {
72                 struct list_head *list;
73                 unsigned long flags;
74
75                 assert_object_held(obj);
76                 spin_lock_irqsave(&i915->mm.obj_lock, flags);
77
78                 i915->mm.shrink_count++;
79                 i915->mm.shrink_memory += obj->base.size;
80
81                 if (obj->mm.madv != I915_MADV_WILLNEED)
82                         list = &i915->mm.purge_list;
83                 else
84                         list = &i915->mm.shrink_list;
85                 list_add_tail(&obj->mm.link, list);
86
87                 atomic_set(&obj->mm.shrink_pin, 0);
88                 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
89         }
90 }
91
92 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
93 {
94         struct drm_i915_private *i915 = to_i915(obj->base.dev);
95         int err;
96
97         assert_object_held_shared(obj);
98
99         if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
100                 drm_dbg(&i915->drm,
101                         "Attempting to obtain a purgeable object\n");
102                 return -EFAULT;
103         }
104
105         err = obj->ops->get_pages(obj);
106         GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
107
108         return err;
109 }
110
111 /* Ensure that the associated pages are gathered from the backing storage
112  * and pinned into our object. i915_gem_object_pin_pages() may be called
113  * multiple times before they are released by a single call to
114  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
115  * either as a result of memory pressure (reaping pages under the shrinker)
116  * or as the object is itself released.
117  */
118 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
119 {
120         int err;
121
122         assert_object_held(obj);
123
124         assert_object_held_shared(obj);
125
126         if (unlikely(!i915_gem_object_has_pages(obj))) {
127                 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
128
129                 err = ____i915_gem_object_get_pages(obj);
130                 if (err)
131                         return err;
132
133                 smp_mb__before_atomic();
134         }
135         atomic_inc(&obj->mm.pages_pin_count);
136
137         return 0;
138 }
139
140 int i915_gem_object_pin_pages_unlocked(struct drm_i915_gem_object *obj)
141 {
142         struct i915_gem_ww_ctx ww;
143         int err;
144
145         i915_gem_ww_ctx_init(&ww, true);
146 retry:
147         err = i915_gem_object_lock(obj, &ww);
148         if (!err)
149                 err = i915_gem_object_pin_pages(obj);
150
151         if (err == -EDEADLK) {
152                 err = i915_gem_ww_ctx_backoff(&ww);
153                 if (!err)
154                         goto retry;
155         }
156         i915_gem_ww_ctx_fini(&ww);
157         return err;
158 }
159
160 /* Immediately discard the backing storage */
161 void i915_gem_object_truncate(struct drm_i915_gem_object *obj)
162 {
163         drm_gem_free_mmap_offset(&obj->base);
164         if (obj->ops->truncate)
165                 obj->ops->truncate(obj);
166 }
167
168 /* Try to discard unwanted pages */
169 void i915_gem_object_writeback(struct drm_i915_gem_object *obj)
170 {
171         assert_object_held_shared(obj);
172         GEM_BUG_ON(i915_gem_object_has_pages(obj));
173
174         if (obj->ops->writeback)
175                 obj->ops->writeback(obj);
176 }
177
178 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
179 {
180         struct radix_tree_iter iter;
181         void __rcu **slot;
182
183         rcu_read_lock();
184         radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
185                 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
186         radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
187                 radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
188         rcu_read_unlock();
189 }
190
191 static void unmap_object(struct drm_i915_gem_object *obj, void *ptr)
192 {
193         if (is_vmalloc_addr(ptr))
194                 vunmap(ptr);
195 }
196
197 struct sg_table *
198 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
199 {
200         struct sg_table *pages;
201
202         assert_object_held_shared(obj);
203
204         pages = fetch_and_zero(&obj->mm.pages);
205         if (IS_ERR_OR_NULL(pages))
206                 return pages;
207
208         if (i915_gem_object_is_volatile(obj))
209                 obj->mm.madv = I915_MADV_WILLNEED;
210
211         i915_gem_object_make_unshrinkable(obj);
212
213         if (obj->mm.mapping) {
214                 unmap_object(obj, page_mask_bits(obj->mm.mapping));
215                 obj->mm.mapping = NULL;
216         }
217
218         __i915_gem_object_reset_page_iter(obj);
219         obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
220
221         return pages;
222 }
223
224 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
225 {
226         struct sg_table *pages;
227
228         if (i915_gem_object_has_pinned_pages(obj))
229                 return -EBUSY;
230
231         /* May be called by shrinker from within get_pages() (on another bo) */
232         assert_object_held_shared(obj);
233
234         i915_gem_object_release_mmap_offset(obj);
235
236         /*
237          * ->put_pages might need to allocate memory for the bit17 swizzle
238          * array, hence protect them from being reaped by removing them from gtt
239          * lists early.
240          */
241         pages = __i915_gem_object_unset_pages(obj);
242
243         /*
244          * XXX Temporary hijinx to avoid updating all backends to handle
245          * NULL pages. In the future, when we have more asynchronous
246          * get_pages backends we should be better able to handle the
247          * cancellation of the async task in a more uniform manner.
248          */
249         if (!IS_ERR_OR_NULL(pages))
250                 obj->ops->put_pages(obj, pages);
251
252         return 0;
253 }
254
255 /* The 'mapping' part of i915_gem_object_pin_map() below */
256 static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
257                                       enum i915_map_type type)
258 {
259         unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
260         struct page *stack[32], **pages = stack, *page;
261         struct sgt_iter iter;
262         pgprot_t pgprot;
263         void *vaddr;
264
265         switch (type) {
266         default:
267                 MISSING_CASE(type);
268                 fallthrough;    /* to use PAGE_KERNEL anyway */
269         case I915_MAP_WB:
270                 /*
271                  * On 32b, highmem using a finite set of indirect PTE (i.e.
272                  * vmap) to provide virtual mappings of the high pages.
273                  * As these are finite, map_new_virtual() must wait for some
274                  * other kmap() to finish when it runs out. If we map a large
275                  * number of objects, there is no method for it to tell us
276                  * to release the mappings, and we deadlock.
277                  *
278                  * However, if we make an explicit vmap of the page, that
279                  * uses a larger vmalloc arena, and also has the ability
280                  * to tell us to release unwanted mappings. Most importantly,
281                  * it will fail and propagate an error instead of waiting
282                  * forever.
283                  *
284                  * So if the page is beyond the 32b boundary, make an explicit
285                  * vmap.
286                  */
287                 if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
288                         return page_address(sg_page(obj->mm.pages->sgl));
289                 pgprot = PAGE_KERNEL;
290                 break;
291         case I915_MAP_WC:
292                 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
293                 break;
294         }
295
296         if (n_pages > ARRAY_SIZE(stack)) {
297                 /* Too big for stack -- allocate temporary array instead */
298                 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
299                 if (!pages)
300                         return ERR_PTR(-ENOMEM);
301         }
302
303         i = 0;
304         for_each_sgt_page(page, iter, obj->mm.pages)
305                 pages[i++] = page;
306         vaddr = vmap(pages, n_pages, 0, pgprot);
307         if (pages != stack)
308                 kvfree(pages);
309
310         return vaddr ?: ERR_PTR(-ENOMEM);
311 }
312
313 static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
314                                      enum i915_map_type type)
315 {
316         resource_size_t iomap = obj->mm.region->iomap.base -
317                 obj->mm.region->region.start;
318         unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
319         unsigned long stack[32], *pfns = stack, i;
320         struct sgt_iter iter;
321         dma_addr_t addr;
322         void *vaddr;
323
324         if (type != I915_MAP_WC)
325                 return ERR_PTR(-ENODEV);
326
327         if (n_pfn > ARRAY_SIZE(stack)) {
328                 /* Too big for stack -- allocate temporary array instead */
329                 pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
330                 if (!pfns)
331                         return ERR_PTR(-ENOMEM);
332         }
333
334         i = 0;
335         for_each_sgt_daddr(addr, iter, obj->mm.pages)
336                 pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
337         vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
338         if (pfns != stack)
339                 kvfree(pfns);
340
341         return vaddr ?: ERR_PTR(-ENOMEM);
342 }
343
344 /* get, pin, and map the pages of the object into kernel space */
345 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
346                               enum i915_map_type type)
347 {
348         enum i915_map_type has_type;
349         bool pinned;
350         void *ptr;
351         int err;
352
353         if (!i915_gem_object_has_struct_page(obj) &&
354             !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
355                 return ERR_PTR(-ENXIO);
356
357         assert_object_held(obj);
358
359         pinned = !(type & I915_MAP_OVERRIDE);
360         type &= ~I915_MAP_OVERRIDE;
361
362         if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
363                 if (unlikely(!i915_gem_object_has_pages(obj))) {
364                         GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
365
366                         err = ____i915_gem_object_get_pages(obj);
367                         if (err)
368                                 return ERR_PTR(err);
369
370                         smp_mb__before_atomic();
371                 }
372                 atomic_inc(&obj->mm.pages_pin_count);
373                 pinned = false;
374         }
375         GEM_BUG_ON(!i915_gem_object_has_pages(obj));
376
377         ptr = page_unpack_bits(obj->mm.mapping, &has_type);
378         if (ptr && has_type != type) {
379                 if (pinned) {
380                         ptr = ERR_PTR(-EBUSY);
381                         goto err_unpin;
382                 }
383
384                 unmap_object(obj, ptr);
385
386                 ptr = obj->mm.mapping = NULL;
387         }
388
389         if (!ptr) {
390                 if (GEM_WARN_ON(type == I915_MAP_WC &&
391                                 !static_cpu_has(X86_FEATURE_PAT)))
392                         ptr = ERR_PTR(-ENODEV);
393                 else if (i915_gem_object_has_struct_page(obj))
394                         ptr = i915_gem_object_map_page(obj, type);
395                 else
396                         ptr = i915_gem_object_map_pfn(obj, type);
397                 if (IS_ERR(ptr))
398                         goto err_unpin;
399
400                 obj->mm.mapping = page_pack_bits(ptr, type);
401         }
402
403         return ptr;
404
405 err_unpin:
406         atomic_dec(&obj->mm.pages_pin_count);
407         return ptr;
408 }
409
410 void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
411                                        enum i915_map_type type)
412 {
413         void *ret;
414
415         i915_gem_object_lock(obj, NULL);
416         ret = i915_gem_object_pin_map(obj, type);
417         i915_gem_object_unlock(obj);
418
419         return ret;
420 }
421
422 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
423                                  unsigned long offset,
424                                  unsigned long size)
425 {
426         enum i915_map_type has_type;
427         void *ptr;
428
429         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
430         GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
431                                      offset, size, obj->base.size));
432
433         wmb(); /* let all previous writes be visible to coherent partners */
434         obj->mm.dirty = true;
435
436         if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
437                 return;
438
439         ptr = page_unpack_bits(obj->mm.mapping, &has_type);
440         if (has_type == I915_MAP_WC)
441                 return;
442
443         drm_clflush_virt_range(ptr + offset, size);
444         if (size == obj->base.size) {
445                 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
446                 obj->cache_dirty = false;
447         }
448 }
449
450 void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
451 {
452         GEM_BUG_ON(!obj->mm.mapping);
453
454         /*
455          * We allow removing the mapping from underneath pinned pages!
456          *
457          * Furthermore, since this is an unsafe operation reserved only
458          * for construction time manipulation, we ignore locking prudence.
459          */
460         unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
461
462         i915_gem_object_unpin_map(obj);
463 }
464
465 struct scatterlist *
466 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
467                          struct i915_gem_object_page_iter *iter,
468                          unsigned int n,
469                          unsigned int *offset,
470                          bool allow_alloc)
471 {
472         const bool dma = iter == &obj->mm.get_dma_page;
473         struct scatterlist *sg;
474         unsigned int idx, count;
475
476         might_sleep();
477         GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
478         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
479
480         /* As we iterate forward through the sg, we record each entry in a
481          * radixtree for quick repeated (backwards) lookups. If we have seen
482          * this index previously, we will have an entry for it.
483          *
484          * Initial lookup is O(N), but this is amortized to O(1) for
485          * sequential page access (where each new request is consecutive
486          * to the previous one). Repeated lookups are O(lg(obj->base.size)),
487          * i.e. O(1) with a large constant!
488          */
489         if (n < READ_ONCE(iter->sg_idx))
490                 goto lookup;
491
492         if (!allow_alloc)
493                 goto manual_lookup;
494
495         mutex_lock(&iter->lock);
496
497         /* We prefer to reuse the last sg so that repeated lookup of this
498          * (or the subsequent) sg are fast - comparing against the last
499          * sg is faster than going through the radixtree.
500          */
501
502         sg = iter->sg_pos;
503         idx = iter->sg_idx;
504         count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
505
506         while (idx + count <= n) {
507                 void *entry;
508                 unsigned long i;
509                 int ret;
510
511                 /* If we cannot allocate and insert this entry, or the
512                  * individual pages from this range, cancel updating the
513                  * sg_idx so that on this lookup we are forced to linearly
514                  * scan onwards, but on future lookups we will try the
515                  * insertion again (in which case we need to be careful of
516                  * the error return reporting that we have already inserted
517                  * this index).
518                  */
519                 ret = radix_tree_insert(&iter->radix, idx, sg);
520                 if (ret && ret != -EEXIST)
521                         goto scan;
522
523                 entry = xa_mk_value(idx);
524                 for (i = 1; i < count; i++) {
525                         ret = radix_tree_insert(&iter->radix, idx + i, entry);
526                         if (ret && ret != -EEXIST)
527                                 goto scan;
528                 }
529
530                 idx += count;
531                 sg = ____sg_next(sg);
532                 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
533         }
534
535 scan:
536         iter->sg_pos = sg;
537         iter->sg_idx = idx;
538
539         mutex_unlock(&iter->lock);
540
541         if (unlikely(n < idx)) /* insertion completed by another thread */
542                 goto lookup;
543
544         goto manual_walk;
545
546 manual_lookup:
547         idx = 0;
548         sg = obj->mm.pages->sgl;
549         count = __sg_page_count(sg);
550
551 manual_walk:
552         /*
553          * In case we failed to insert the entry into the radixtree, we need
554          * to look beyond the current sg.
555          */
556         while (idx + count <= n) {
557                 idx += count;
558                 sg = ____sg_next(sg);
559                 count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
560         }
561
562         *offset = n - idx;
563         return sg;
564
565 lookup:
566         rcu_read_lock();
567
568         sg = radix_tree_lookup(&iter->radix, n);
569         GEM_BUG_ON(!sg);
570
571         /* If this index is in the middle of multi-page sg entry,
572          * the radix tree will contain a value entry that points
573          * to the start of that range. We will return the pointer to
574          * the base page and the offset of this page within the
575          * sg entry's range.
576          */
577         *offset = 0;
578         if (unlikely(xa_is_value(sg))) {
579                 unsigned long base = xa_to_value(sg);
580
581                 sg = radix_tree_lookup(&iter->radix, base);
582                 GEM_BUG_ON(!sg);
583
584                 *offset = n - base;
585         }
586
587         rcu_read_unlock();
588
589         return sg;
590 }
591
592 struct page *
593 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
594 {
595         struct scatterlist *sg;
596         unsigned int offset;
597
598         GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
599
600         sg = i915_gem_object_get_sg(obj, n, &offset, true);
601         return nth_page(sg_page(sg), offset);
602 }
603
604 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
605 struct page *
606 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
607                                unsigned int n)
608 {
609         struct page *page;
610
611         page = i915_gem_object_get_page(obj, n);
612         if (!obj->mm.dirty)
613                 set_page_dirty(page);
614
615         return page;
616 }
617
618 dma_addr_t
619 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
620                                     unsigned long n,
621                                     unsigned int *len)
622 {
623         struct scatterlist *sg;
624         unsigned int offset;
625
626         sg = i915_gem_object_get_sg_dma(obj, n, &offset, true);
627
628         if (len)
629                 *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
630
631         return sg_dma_address(sg) + (offset << PAGE_SHIFT);
632 }
633
634 dma_addr_t
635 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
636                                 unsigned long n)
637 {
638         return i915_gem_object_get_dma_address_len(obj, n, NULL);
639 }