Merge tag 'drm-misc-next-2021-10-14' of git://anongit.freedesktop.org/drm/drm-misc...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gem / i915_gem_shmem.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6
7 #include <linux/pagevec.h>
8 #include <linux/swap.h>
9
10 #include "gem/i915_gem_region.h"
11 #include "i915_drv.h"
12 #include "i915_gemfs.h"
13 #include "i915_gem_object.h"
14 #include "i915_scatterlist.h"
15 #include "i915_trace.h"
16
17 /*
18  * Move pages to appropriate lru and release the pagevec, decrementing the
19  * ref count of those pages.
20  */
21 static void check_release_pagevec(struct pagevec *pvec)
22 {
23         check_move_unevictable_pages(pvec);
24         __pagevec_release(pvec);
25         cond_resched();
26 }
27
28 static int shmem_get_pages(struct drm_i915_gem_object *obj)
29 {
30         struct drm_i915_private *i915 = to_i915(obj->base.dev);
31         struct intel_memory_region *mem = obj->mm.region;
32         const unsigned long page_count = obj->base.size / PAGE_SIZE;
33         unsigned long i;
34         struct address_space *mapping;
35         struct sg_table *st;
36         struct scatterlist *sg;
37         struct sgt_iter sgt_iter;
38         struct page *page;
39         unsigned long last_pfn = 0;     /* suppress gcc warning */
40         unsigned int max_segment = i915_sg_segment_size();
41         unsigned int sg_page_sizes;
42         gfp_t noreclaim;
43         int ret;
44
45         /*
46          * Assert that the object is not currently in any GPU domain. As it
47          * wasn't in the GTT, there shouldn't be any way it could have been in
48          * a GPU cache
49          */
50         GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
51         GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
52
53         /*
54          * If there's no chance of allocating enough pages for the whole
55          * object, bail early.
56          */
57         if (obj->base.size > resource_size(&mem->region))
58                 return -ENOMEM;
59
60         st = kmalloc(sizeof(*st), GFP_KERNEL);
61         if (!st)
62                 return -ENOMEM;
63
64 rebuild_st:
65         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
66                 kfree(st);
67                 return -ENOMEM;
68         }
69
70         /*
71          * Get the list of pages out of our struct file.  They'll be pinned
72          * at this point until we release them.
73          *
74          * Fail silently without starting the shrinker
75          */
76         mapping = obj->base.filp->f_mapping;
77         mapping_set_unevictable(mapping);
78         noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
79         noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
80
81         sg = st->sgl;
82         st->nents = 0;
83         sg_page_sizes = 0;
84         for (i = 0; i < page_count; i++) {
85                 const unsigned int shrink[] = {
86                         I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
87                         0,
88                 }, *s = shrink;
89                 gfp_t gfp = noreclaim;
90
91                 do {
92                         cond_resched();
93                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
94                         if (!IS_ERR(page))
95                                 break;
96
97                         if (!*s) {
98                                 ret = PTR_ERR(page);
99                                 goto err_sg;
100                         }
101
102                         i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++);
103
104                         /*
105                          * We've tried hard to allocate the memory by reaping
106                          * our own buffer, now let the real VM do its job and
107                          * go down in flames if truly OOM.
108                          *
109                          * However, since graphics tend to be disposable,
110                          * defer the oom here by reporting the ENOMEM back
111                          * to userspace.
112                          */
113                         if (!*s) {
114                                 /* reclaim and warn, but no oom */
115                                 gfp = mapping_gfp_mask(mapping);
116
117                                 /*
118                                  * Our bo are always dirty and so we require
119                                  * kswapd to reclaim our pages (direct reclaim
120                                  * does not effectively begin pageout of our
121                                  * buffers on its own). However, direct reclaim
122                                  * only waits for kswapd when under allocation
123                                  * congestion. So as a result __GFP_RECLAIM is
124                                  * unreliable and fails to actually reclaim our
125                                  * dirty pages -- unless you try over and over
126                                  * again with !__GFP_NORETRY. However, we still
127                                  * want to fail this allocation rather than
128                                  * trigger the out-of-memory killer and for
129                                  * this we want __GFP_RETRY_MAYFAIL.
130                                  */
131                                 gfp |= __GFP_RETRY_MAYFAIL;
132                         }
133                 } while (1);
134
135                 if (!i ||
136                     sg->length >= max_segment ||
137                     page_to_pfn(page) != last_pfn + 1) {
138                         if (i) {
139                                 sg_page_sizes |= sg->length;
140                                 sg = sg_next(sg);
141                         }
142                         st->nents++;
143                         sg_set_page(sg, page, PAGE_SIZE, 0);
144                 } else {
145                         sg->length += PAGE_SIZE;
146                 }
147                 last_pfn = page_to_pfn(page);
148
149                 /* Check that the i965g/gm workaround works. */
150                 GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL);
151         }
152         if (sg) { /* loop terminated early; short sg table */
153                 sg_page_sizes |= sg->length;
154                 sg_mark_end(sg);
155         }
156
157         /* Trim unused sg entries to avoid wasting memory. */
158         i915_sg_trim(st);
159
160         ret = i915_gem_gtt_prepare_pages(obj, st);
161         if (ret) {
162                 /*
163                  * DMA remapping failed? One possible cause is that
164                  * it could not reserve enough large entries, asking
165                  * for PAGE_SIZE chunks instead may be helpful.
166                  */
167                 if (max_segment > PAGE_SIZE) {
168                         for_each_sgt_page(page, sgt_iter, st)
169                                 put_page(page);
170                         sg_free_table(st);
171
172                         max_segment = PAGE_SIZE;
173                         goto rebuild_st;
174                 } else {
175                         dev_warn(i915->drm.dev,
176                                  "Failed to DMA remap %lu pages\n",
177                                  page_count);
178                         goto err_pages;
179                 }
180         }
181
182         if (i915_gem_object_needs_bit17_swizzle(obj))
183                 i915_gem_object_do_bit_17_swizzle(obj, st);
184
185         if (i915_gem_object_can_bypass_llc(obj))
186                 obj->cache_dirty = true;
187
188         __i915_gem_object_set_pages(obj, st, sg_page_sizes);
189
190         return 0;
191
192 err_sg:
193         sg_mark_end(sg);
194 err_pages:
195         mapping_clear_unevictable(mapping);
196         if (sg != st->sgl) {
197                 struct pagevec pvec;
198
199                 pagevec_init(&pvec);
200                 for_each_sgt_page(page, sgt_iter, st) {
201                         if (!pagevec_add(&pvec, page))
202                                 check_release_pagevec(&pvec);
203                 }
204                 if (pagevec_count(&pvec))
205                         check_release_pagevec(&pvec);
206         }
207         sg_free_table(st);
208         kfree(st);
209
210         /*
211          * shmemfs first checks if there is enough memory to allocate the page
212          * and reports ENOSPC should there be insufficient, along with the usual
213          * ENOMEM for a genuine allocation failure.
214          *
215          * We use ENOSPC in our driver to mean that we have run out of aperture
216          * space and so want to translate the error from shmemfs back to our
217          * usual understanding of ENOMEM.
218          */
219         if (ret == -ENOSPC)
220                 ret = -ENOMEM;
221
222         return ret;
223 }
224
225 static void
226 shmem_truncate(struct drm_i915_gem_object *obj)
227 {
228         /*
229          * Our goal here is to return as much of the memory as
230          * is possible back to the system as we are called from OOM.
231          * To do this we must instruct the shmfs to drop all of its
232          * backing pages, *now*.
233          */
234         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
235         obj->mm.madv = __I915_MADV_PURGED;
236         obj->mm.pages = ERR_PTR(-EFAULT);
237 }
238
239 static void
240 shmem_writeback(struct drm_i915_gem_object *obj)
241 {
242         struct address_space *mapping;
243         struct writeback_control wbc = {
244                 .sync_mode = WB_SYNC_NONE,
245                 .nr_to_write = SWAP_CLUSTER_MAX,
246                 .range_start = 0,
247                 .range_end = LLONG_MAX,
248                 .for_reclaim = 1,
249         };
250         unsigned long i;
251
252         /*
253          * Leave mmapings intact (GTT will have been revoked on unbinding,
254          * leaving only CPU mmapings around) and add those pages to the LRU
255          * instead of invoking writeback so they are aged and paged out
256          * as normal.
257          */
258         mapping = obj->base.filp->f_mapping;
259
260         /* Begin writeback on each dirty page */
261         for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
262                 struct page *page;
263
264                 page = find_lock_page(mapping, i);
265                 if (!page)
266                         continue;
267
268                 if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
269                         int ret;
270
271                         SetPageReclaim(page);
272                         ret = mapping->a_ops->writepage(page, &wbc);
273                         if (!PageWriteback(page))
274                                 ClearPageReclaim(page);
275                         if (!ret)
276                                 goto put;
277                 }
278                 unlock_page(page);
279 put:
280                 put_page(page);
281         }
282 }
283
284 void
285 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
286                                 struct sg_table *pages,
287                                 bool needs_clflush)
288 {
289         struct drm_i915_private *i915 = to_i915(obj->base.dev);
290
291         GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
292
293         if (obj->mm.madv == I915_MADV_DONTNEED)
294                 obj->mm.dirty = false;
295
296         if (needs_clflush &&
297             (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
298             !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
299                 drm_clflush_sg(pages);
300
301         __start_cpu_write(obj);
302         /*
303          * On non-LLC platforms, force the flush-on-acquire if this is ever
304          * swapped-in. Our async flush path is not trust worthy enough yet(and
305          * happens in the wrong order), and with some tricks it's conceivable
306          * for userspace to change the cache-level to I915_CACHE_NONE after the
307          * pages are swapped-in, and since execbuf binds the object before doing
308          * the async flush, we have a race window.
309          */
310         if (!HAS_LLC(i915))
311                 obj->cache_dirty = true;
312 }
313
314 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
315 {
316         struct sgt_iter sgt_iter;
317         struct pagevec pvec;
318         struct page *page;
319
320         GEM_WARN_ON(IS_DGFX(to_i915(obj->base.dev)));
321         __i915_gem_object_release_shmem(obj, pages, true);
322
323         i915_gem_gtt_finish_pages(obj, pages);
324
325         if (i915_gem_object_needs_bit17_swizzle(obj))
326                 i915_gem_object_save_bit_17_swizzle(obj, pages);
327
328         mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
329
330         pagevec_init(&pvec);
331         for_each_sgt_page(page, sgt_iter, pages) {
332                 if (obj->mm.dirty)
333                         set_page_dirty(page);
334
335                 if (obj->mm.madv == I915_MADV_WILLNEED)
336                         mark_page_accessed(page);
337
338                 if (!pagevec_add(&pvec, page))
339                         check_release_pagevec(&pvec);
340         }
341         if (pagevec_count(&pvec))
342                 check_release_pagevec(&pvec);
343         obj->mm.dirty = false;
344
345         sg_free_table(pages);
346         kfree(pages);
347 }
348
349 static void
350 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
351 {
352         if (likely(i915_gem_object_has_struct_page(obj)))
353                 i915_gem_object_put_pages_shmem(obj, pages);
354         else
355                 i915_gem_object_put_pages_phys(obj, pages);
356 }
357
358 static int
359 shmem_pwrite(struct drm_i915_gem_object *obj,
360              const struct drm_i915_gem_pwrite *arg)
361 {
362         struct address_space *mapping = obj->base.filp->f_mapping;
363         char __user *user_data = u64_to_user_ptr(arg->data_ptr);
364         u64 remain, offset;
365         unsigned int pg;
366
367         /* Caller already validated user args */
368         GEM_BUG_ON(!access_ok(user_data, arg->size));
369
370         if (!i915_gem_object_has_struct_page(obj))
371                 return i915_gem_object_pwrite_phys(obj, arg);
372
373         /*
374          * Before we instantiate/pin the backing store for our use, we
375          * can prepopulate the shmemfs filp efficiently using a write into
376          * the pagecache. We avoid the penalty of instantiating all the
377          * pages, important if the user is just writing to a few and never
378          * uses the object on the GPU, and using a direct write into shmemfs
379          * allows it to avoid the cost of retrieving a page (either swapin
380          * or clearing-before-use) before it is overwritten.
381          */
382         if (i915_gem_object_has_pages(obj))
383                 return -ENODEV;
384
385         if (obj->mm.madv != I915_MADV_WILLNEED)
386                 return -EFAULT;
387
388         /*
389          * Before the pages are instantiated the object is treated as being
390          * in the CPU domain. The pages will be clflushed as required before
391          * use, and we can freely write into the pages directly. If userspace
392          * races pwrite with any other operation; corruption will ensue -
393          * that is userspace's prerogative!
394          */
395
396         remain = arg->size;
397         offset = arg->offset;
398         pg = offset_in_page(offset);
399
400         do {
401                 unsigned int len, unwritten;
402                 struct page *page;
403                 void *data, *vaddr;
404                 int err;
405                 char c;
406
407                 len = PAGE_SIZE - pg;
408                 if (len > remain)
409                         len = remain;
410
411                 /* Prefault the user page to reduce potential recursion */
412                 err = __get_user(c, user_data);
413                 if (err)
414                         return err;
415
416                 err = __get_user(c, user_data + len - 1);
417                 if (err)
418                         return err;
419
420                 err = pagecache_write_begin(obj->base.filp, mapping,
421                                             offset, len, 0,
422                                             &page, &data);
423                 if (err < 0)
424                         return err;
425
426                 vaddr = kmap_atomic(page);
427                 unwritten = __copy_from_user_inatomic(vaddr + pg,
428                                                       user_data,
429                                                       len);
430                 kunmap_atomic(vaddr);
431
432                 err = pagecache_write_end(obj->base.filp, mapping,
433                                           offset, len, len - unwritten,
434                                           page, data);
435                 if (err < 0)
436                         return err;
437
438                 /* We don't handle -EFAULT, leave it to the caller to check */
439                 if (unwritten)
440                         return -ENODEV;
441
442                 remain -= len;
443                 user_data += len;
444                 offset += len;
445                 pg = 0;
446         } while (remain);
447
448         return 0;
449 }
450
451 static int
452 shmem_pread(struct drm_i915_gem_object *obj,
453             const struct drm_i915_gem_pread *arg)
454 {
455         if (!i915_gem_object_has_struct_page(obj))
456                 return i915_gem_object_pread_phys(obj, arg);
457
458         return -ENODEV;
459 }
460
461 static void shmem_release(struct drm_i915_gem_object *obj)
462 {
463         if (i915_gem_object_has_struct_page(obj))
464                 i915_gem_object_release_memory_region(obj);
465
466         fput(obj->base.filp);
467 }
468
469 const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
470         .name = "i915_gem_object_shmem",
471         .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
472
473         .get_pages = shmem_get_pages,
474         .put_pages = shmem_put_pages,
475         .truncate = shmem_truncate,
476         .writeback = shmem_writeback,
477
478         .pwrite = shmem_pwrite,
479         .pread = shmem_pread,
480
481         .release = shmem_release,
482 };
483
484 static int __create_shmem(struct drm_i915_private *i915,
485                           struct drm_gem_object *obj,
486                           resource_size_t size)
487 {
488         unsigned long flags = VM_NORESERVE;
489         struct file *filp;
490
491         drm_gem_private_object_init(&i915->drm, obj, size);
492
493         if (i915->mm.gemfs)
494                 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
495                                                  flags);
496         else
497                 filp = shmem_file_setup("i915", size, flags);
498         if (IS_ERR(filp))
499                 return PTR_ERR(filp);
500
501         obj->filp = filp;
502         return 0;
503 }
504
505 static int shmem_object_init(struct intel_memory_region *mem,
506                              struct drm_i915_gem_object *obj,
507                              resource_size_t size,
508                              resource_size_t page_size,
509                              unsigned int flags)
510 {
511         static struct lock_class_key lock_class;
512         struct drm_i915_private *i915 = mem->i915;
513         struct address_space *mapping;
514         unsigned int cache_level;
515         gfp_t mask;
516         int ret;
517
518         ret = __create_shmem(i915, &obj->base, size);
519         if (ret)
520                 return ret;
521
522         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
523         if (IS_I965GM(i915) || IS_I965G(i915)) {
524                 /* 965gm cannot relocate objects above 4GiB. */
525                 mask &= ~__GFP_HIGHMEM;
526                 mask |= __GFP_DMA32;
527         }
528
529         mapping = obj->base.filp->f_mapping;
530         mapping_set_gfp_mask(mapping, mask);
531         GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
532
533         i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
534         obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
535         obj->write_domain = I915_GEM_DOMAIN_CPU;
536         obj->read_domains = I915_GEM_DOMAIN_CPU;
537
538         if (HAS_LLC(i915))
539                 /* On some devices, we can have the GPU use the LLC (the CPU
540                  * cache) for about a 10% performance improvement
541                  * compared to uncached.  Graphics requests other than
542                  * display scanout are coherent with the CPU in
543                  * accessing this cache.  This means in this mode we
544                  * don't need to clflush on the CPU side, and on the
545                  * GPU side we only need to flush internal caches to
546                  * get data visible to the CPU.
547                  *
548                  * However, we maintain the display planes as UC, and so
549                  * need to rebind when first used as such.
550                  */
551                 cache_level = I915_CACHE_LLC;
552         else
553                 cache_level = I915_CACHE_NONE;
554
555         i915_gem_object_set_cache_coherency(obj, cache_level);
556
557         i915_gem_object_init_memory_region(obj, mem);
558
559         return 0;
560 }
561
562 struct drm_i915_gem_object *
563 i915_gem_object_create_shmem(struct drm_i915_private *i915,
564                              resource_size_t size)
565 {
566         return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
567                                              size, 0, 0);
568 }
569
570 /* Allocate a new GEM object and fill it with the supplied data */
571 struct drm_i915_gem_object *
572 i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
573                                        const void *data, resource_size_t size)
574 {
575         struct drm_i915_gem_object *obj;
576         struct file *file;
577         resource_size_t offset;
578         int err;
579
580         GEM_WARN_ON(IS_DGFX(dev_priv));
581         obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
582         if (IS_ERR(obj))
583                 return obj;
584
585         GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
586
587         file = obj->base.filp;
588         offset = 0;
589         do {
590                 unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
591                 struct page *page;
592                 void *pgdata, *vaddr;
593
594                 err = pagecache_write_begin(file, file->f_mapping,
595                                             offset, len, 0,
596                                             &page, &pgdata);
597                 if (err < 0)
598                         goto fail;
599
600                 vaddr = kmap(page);
601                 memcpy(vaddr, data, len);
602                 kunmap(page);
603
604                 err = pagecache_write_end(file, file->f_mapping,
605                                           offset, len, len,
606                                           page, pgdata);
607                 if (err < 0)
608                         goto fail;
609
610                 size -= len;
611                 data += len;
612                 offset += len;
613         } while (size);
614
615         return obj;
616
617 fail:
618         i915_gem_object_put(obj);
619         return ERR_PTR(err);
620 }
621
622 static int init_shmem(struct intel_memory_region *mem)
623 {
624         int err;
625
626         err = i915_gemfs_init(mem->i915);
627         if (err) {
628                 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n",
629                          err);
630         }
631
632         intel_memory_region_set_name(mem, "system");
633
634         return 0; /* Don't error, we can simply fallback to the kernel mnt */
635 }
636
637 static void release_shmem(struct intel_memory_region *mem)
638 {
639         i915_gemfs_fini(mem->i915);
640 }
641
642 static const struct intel_memory_region_ops shmem_region_ops = {
643         .init = init_shmem,
644         .release = release_shmem,
645         .init_object = shmem_object_init,
646 };
647
648 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
649                                                  u16 type, u16 instance)
650 {
651         return intel_memory_region_create(i915, 0,
652                                           totalram_pages() << PAGE_SHIFT,
653                                           PAGE_SIZE, 0,
654                                           type, instance,
655                                           &shmem_region_ops);
656 }
657
658 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj)
659 {
660         return obj->ops == &i915_gem_shmem_ops;
661 }