Merge tag 'net-next-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gem / i915_gem_shmem.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2014-2016 Intel Corporation
5  */
6
7 #include <linux/pagevec.h>
8 #include <linux/swap.h>
9
10 #include "gem/i915_gem_region.h"
11 #include "i915_drv.h"
12 #include "i915_gemfs.h"
13 #include "i915_gem_object.h"
14 #include "i915_scatterlist.h"
15 #include "i915_trace.h"
16
17 /*
18  * Move pages to appropriate lru and release the pagevec, decrementing the
19  * ref count of those pages.
20  */
21 static void check_release_pagevec(struct pagevec *pvec)
22 {
23         check_move_unevictable_pages(pvec);
24         __pagevec_release(pvec);
25         cond_resched();
26 }
27
28 static int shmem_get_pages(struct drm_i915_gem_object *obj)
29 {
30         struct drm_i915_private *i915 = to_i915(obj->base.dev);
31         struct intel_memory_region *mem = obj->mm.region;
32         const unsigned long page_count = obj->base.size / PAGE_SIZE;
33         unsigned long i;
34         struct address_space *mapping;
35         struct sg_table *st;
36         struct scatterlist *sg;
37         struct sgt_iter sgt_iter;
38         struct page *page;
39         unsigned long last_pfn = 0;     /* suppress gcc warning */
40         unsigned int max_segment = i915_sg_segment_size();
41         unsigned int sg_page_sizes;
42         gfp_t noreclaim;
43         int ret;
44
45         /*
46          * Assert that the object is not currently in any GPU domain. As it
47          * wasn't in the GTT, there shouldn't be any way it could have been in
48          * a GPU cache
49          */
50         GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
51         GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
52
53         /*
54          * If there's no chance of allocating enough pages for the whole
55          * object, bail early.
56          */
57         if (obj->base.size > resource_size(&mem->region))
58                 return -ENOMEM;
59
60         st = kmalloc(sizeof(*st), GFP_KERNEL);
61         if (!st)
62                 return -ENOMEM;
63
64 rebuild_st:
65         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
66                 kfree(st);
67                 return -ENOMEM;
68         }
69
70         /*
71          * Get the list of pages out of our struct file.  They'll be pinned
72          * at this point until we release them.
73          *
74          * Fail silently without starting the shrinker
75          */
76         mapping = obj->base.filp->f_mapping;
77         mapping_set_unevictable(mapping);
78         noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
79         noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
80
81         sg = st->sgl;
82         st->nents = 0;
83         sg_page_sizes = 0;
84         for (i = 0; i < page_count; i++) {
85                 const unsigned int shrink[] = {
86                         I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
87                         0,
88                 }, *s = shrink;
89                 gfp_t gfp = noreclaim;
90
91                 do {
92                         cond_resched();
93                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
94                         if (!IS_ERR(page))
95                                 break;
96
97                         if (!*s) {
98                                 ret = PTR_ERR(page);
99                                 goto err_sg;
100                         }
101
102                         i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++);
103
104                         /*
105                          * We've tried hard to allocate the memory by reaping
106                          * our own buffer, now let the real VM do its job and
107                          * go down in flames if truly OOM.
108                          *
109                          * However, since graphics tend to be disposable,
110                          * defer the oom here by reporting the ENOMEM back
111                          * to userspace.
112                          */
113                         if (!*s) {
114                                 /* reclaim and warn, but no oom */
115                                 gfp = mapping_gfp_mask(mapping);
116
117                                 /*
118                                  * Our bo are always dirty and so we require
119                                  * kswapd to reclaim our pages (direct reclaim
120                                  * does not effectively begin pageout of our
121                                  * buffers on its own). However, direct reclaim
122                                  * only waits for kswapd when under allocation
123                                  * congestion. So as a result __GFP_RECLAIM is
124                                  * unreliable and fails to actually reclaim our
125                                  * dirty pages -- unless you try over and over
126                                  * again with !__GFP_NORETRY. However, we still
127                                  * want to fail this allocation rather than
128                                  * trigger the out-of-memory killer and for
129                                  * this we want __GFP_RETRY_MAYFAIL.
130                                  */
131                                 gfp |= __GFP_RETRY_MAYFAIL;
132                         }
133                 } while (1);
134
135                 if (!i ||
136                     sg->length >= max_segment ||
137                     page_to_pfn(page) != last_pfn + 1) {
138                         if (i) {
139                                 sg_page_sizes |= sg->length;
140                                 sg = sg_next(sg);
141                         }
142                         st->nents++;
143                         sg_set_page(sg, page, PAGE_SIZE, 0);
144                 } else {
145                         sg->length += PAGE_SIZE;
146                 }
147                 last_pfn = page_to_pfn(page);
148
149                 /* Check that the i965g/gm workaround works. */
150                 GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL);
151         }
152         if (sg) { /* loop terminated early; short sg table */
153                 sg_page_sizes |= sg->length;
154                 sg_mark_end(sg);
155         }
156
157         /* Trim unused sg entries to avoid wasting memory. */
158         i915_sg_trim(st);
159
160         ret = i915_gem_gtt_prepare_pages(obj, st);
161         if (ret) {
162                 /*
163                  * DMA remapping failed? One possible cause is that
164                  * it could not reserve enough large entries, asking
165                  * for PAGE_SIZE chunks instead may be helpful.
166                  */
167                 if (max_segment > PAGE_SIZE) {
168                         for_each_sgt_page(page, sgt_iter, st)
169                                 put_page(page);
170                         sg_free_table(st);
171
172                         max_segment = PAGE_SIZE;
173                         goto rebuild_st;
174                 } else {
175                         dev_warn(i915->drm.dev,
176                                  "Failed to DMA remap %lu pages\n",
177                                  page_count);
178                         goto err_pages;
179                 }
180         }
181
182         if (i915_gem_object_needs_bit17_swizzle(obj))
183                 i915_gem_object_do_bit_17_swizzle(obj, st);
184
185         __i915_gem_object_set_pages(obj, st, sg_page_sizes);
186
187         return 0;
188
189 err_sg:
190         sg_mark_end(sg);
191 err_pages:
192         mapping_clear_unevictable(mapping);
193         if (sg != st->sgl) {
194                 struct pagevec pvec;
195
196                 pagevec_init(&pvec);
197                 for_each_sgt_page(page, sgt_iter, st) {
198                         if (!pagevec_add(&pvec, page))
199                                 check_release_pagevec(&pvec);
200                 }
201                 if (pagevec_count(&pvec))
202                         check_release_pagevec(&pvec);
203         }
204         sg_free_table(st);
205         kfree(st);
206
207         /*
208          * shmemfs first checks if there is enough memory to allocate the page
209          * and reports ENOSPC should there be insufficient, along with the usual
210          * ENOMEM for a genuine allocation failure.
211          *
212          * We use ENOSPC in our driver to mean that we have run out of aperture
213          * space and so want to translate the error from shmemfs back to our
214          * usual understanding of ENOMEM.
215          */
216         if (ret == -ENOSPC)
217                 ret = -ENOMEM;
218
219         return ret;
220 }
221
222 static void
223 shmem_truncate(struct drm_i915_gem_object *obj)
224 {
225         /*
226          * Our goal here is to return as much of the memory as
227          * is possible back to the system as we are called from OOM.
228          * To do this we must instruct the shmfs to drop all of its
229          * backing pages, *now*.
230          */
231         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
232         obj->mm.madv = __I915_MADV_PURGED;
233         obj->mm.pages = ERR_PTR(-EFAULT);
234 }
235
236 static void
237 shmem_writeback(struct drm_i915_gem_object *obj)
238 {
239         struct address_space *mapping;
240         struct writeback_control wbc = {
241                 .sync_mode = WB_SYNC_NONE,
242                 .nr_to_write = SWAP_CLUSTER_MAX,
243                 .range_start = 0,
244                 .range_end = LLONG_MAX,
245                 .for_reclaim = 1,
246         };
247         unsigned long i;
248
249         /*
250          * Leave mmapings intact (GTT will have been revoked on unbinding,
251          * leaving only CPU mmapings around) and add those pages to the LRU
252          * instead of invoking writeback so they are aged and paged out
253          * as normal.
254          */
255         mapping = obj->base.filp->f_mapping;
256
257         /* Begin writeback on each dirty page */
258         for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
259                 struct page *page;
260
261                 page = find_lock_page(mapping, i);
262                 if (!page)
263                         continue;
264
265                 if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
266                         int ret;
267
268                         SetPageReclaim(page);
269                         ret = mapping->a_ops->writepage(page, &wbc);
270                         if (!PageWriteback(page))
271                                 ClearPageReclaim(page);
272                         if (!ret)
273                                 goto put;
274                 }
275                 unlock_page(page);
276 put:
277                 put_page(page);
278         }
279 }
280
281 void
282 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
283                                 struct sg_table *pages,
284                                 bool needs_clflush)
285 {
286         GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
287
288         if (obj->mm.madv == I915_MADV_DONTNEED)
289                 obj->mm.dirty = false;
290
291         if (needs_clflush &&
292             (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
293             !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
294                 drm_clflush_sg(pages);
295
296         __start_cpu_write(obj);
297 }
298
299 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
300 {
301         struct sgt_iter sgt_iter;
302         struct pagevec pvec;
303         struct page *page;
304
305         __i915_gem_object_release_shmem(obj, pages, true);
306
307         i915_gem_gtt_finish_pages(obj, pages);
308
309         if (i915_gem_object_needs_bit17_swizzle(obj))
310                 i915_gem_object_save_bit_17_swizzle(obj, pages);
311
312         mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
313
314         pagevec_init(&pvec);
315         for_each_sgt_page(page, sgt_iter, pages) {
316                 if (obj->mm.dirty)
317                         set_page_dirty(page);
318
319                 if (obj->mm.madv == I915_MADV_WILLNEED)
320                         mark_page_accessed(page);
321
322                 if (!pagevec_add(&pvec, page))
323                         check_release_pagevec(&pvec);
324         }
325         if (pagevec_count(&pvec))
326                 check_release_pagevec(&pvec);
327         obj->mm.dirty = false;
328
329         sg_free_table(pages);
330         kfree(pages);
331 }
332
333 static void
334 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
335 {
336         if (likely(i915_gem_object_has_struct_page(obj)))
337                 i915_gem_object_put_pages_shmem(obj, pages);
338         else
339                 i915_gem_object_put_pages_phys(obj, pages);
340 }
341
342 static int
343 shmem_pwrite(struct drm_i915_gem_object *obj,
344              const struct drm_i915_gem_pwrite *arg)
345 {
346         struct address_space *mapping = obj->base.filp->f_mapping;
347         char __user *user_data = u64_to_user_ptr(arg->data_ptr);
348         u64 remain, offset;
349         unsigned int pg;
350
351         /* Caller already validated user args */
352         GEM_BUG_ON(!access_ok(user_data, arg->size));
353
354         if (!i915_gem_object_has_struct_page(obj))
355                 return i915_gem_object_pwrite_phys(obj, arg);
356
357         /*
358          * Before we instantiate/pin the backing store for our use, we
359          * can prepopulate the shmemfs filp efficiently using a write into
360          * the pagecache. We avoid the penalty of instantiating all the
361          * pages, important if the user is just writing to a few and never
362          * uses the object on the GPU, and using a direct write into shmemfs
363          * allows it to avoid the cost of retrieving a page (either swapin
364          * or clearing-before-use) before it is overwritten.
365          */
366         if (i915_gem_object_has_pages(obj))
367                 return -ENODEV;
368
369         if (obj->mm.madv != I915_MADV_WILLNEED)
370                 return -EFAULT;
371
372         /*
373          * Before the pages are instantiated the object is treated as being
374          * in the CPU domain. The pages will be clflushed as required before
375          * use, and we can freely write into the pages directly. If userspace
376          * races pwrite with any other operation; corruption will ensue -
377          * that is userspace's prerogative!
378          */
379
380         remain = arg->size;
381         offset = arg->offset;
382         pg = offset_in_page(offset);
383
384         do {
385                 unsigned int len, unwritten;
386                 struct page *page;
387                 void *data, *vaddr;
388                 int err;
389                 char c;
390
391                 len = PAGE_SIZE - pg;
392                 if (len > remain)
393                         len = remain;
394
395                 /* Prefault the user page to reduce potential recursion */
396                 err = __get_user(c, user_data);
397                 if (err)
398                         return err;
399
400                 err = __get_user(c, user_data + len - 1);
401                 if (err)
402                         return err;
403
404                 err = pagecache_write_begin(obj->base.filp, mapping,
405                                             offset, len, 0,
406                                             &page, &data);
407                 if (err < 0)
408                         return err;
409
410                 vaddr = kmap_atomic(page);
411                 unwritten = __copy_from_user_inatomic(vaddr + pg,
412                                                       user_data,
413                                                       len);
414                 kunmap_atomic(vaddr);
415
416                 err = pagecache_write_end(obj->base.filp, mapping,
417                                           offset, len, len - unwritten,
418                                           page, data);
419                 if (err < 0)
420                         return err;
421
422                 /* We don't handle -EFAULT, leave it to the caller to check */
423                 if (unwritten)
424                         return -ENODEV;
425
426                 remain -= len;
427                 user_data += len;
428                 offset += len;
429                 pg = 0;
430         } while (remain);
431
432         return 0;
433 }
434
435 static int
436 shmem_pread(struct drm_i915_gem_object *obj,
437             const struct drm_i915_gem_pread *arg)
438 {
439         if (!i915_gem_object_has_struct_page(obj))
440                 return i915_gem_object_pread_phys(obj, arg);
441
442         return -ENODEV;
443 }
444
445 static void shmem_release(struct drm_i915_gem_object *obj)
446 {
447         if (obj->flags & I915_BO_ALLOC_STRUCT_PAGE)
448                 i915_gem_object_release_memory_region(obj);
449
450         fput(obj->base.filp);
451 }
452
453 const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
454         .name = "i915_gem_object_shmem",
455         .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
456
457         .get_pages = shmem_get_pages,
458         .put_pages = shmem_put_pages,
459         .truncate = shmem_truncate,
460         .writeback = shmem_writeback,
461
462         .pwrite = shmem_pwrite,
463         .pread = shmem_pread,
464
465         .release = shmem_release,
466 };
467
468 static int __create_shmem(struct drm_i915_private *i915,
469                           struct drm_gem_object *obj,
470                           resource_size_t size)
471 {
472         unsigned long flags = VM_NORESERVE;
473         struct file *filp;
474
475         drm_gem_private_object_init(&i915->drm, obj, size);
476
477         if (i915->mm.gemfs)
478                 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
479                                                  flags);
480         else
481                 filp = shmem_file_setup("i915", size, flags);
482         if (IS_ERR(filp))
483                 return PTR_ERR(filp);
484
485         obj->filp = filp;
486         return 0;
487 }
488
489 static int shmem_object_init(struct intel_memory_region *mem,
490                              struct drm_i915_gem_object *obj,
491                              resource_size_t size,
492                              unsigned int flags)
493 {
494         static struct lock_class_key lock_class;
495         struct drm_i915_private *i915 = mem->i915;
496         struct address_space *mapping;
497         unsigned int cache_level;
498         gfp_t mask;
499         int ret;
500
501         ret = __create_shmem(i915, &obj->base, size);
502         if (ret)
503                 return ret;
504
505         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
506         if (IS_I965GM(i915) || IS_I965G(i915)) {
507                 /* 965gm cannot relocate objects above 4GiB. */
508                 mask &= ~__GFP_HIGHMEM;
509                 mask |= __GFP_DMA32;
510         }
511
512         mapping = obj->base.filp->f_mapping;
513         mapping_set_gfp_mask(mapping, mask);
514         GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
515
516         i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class,
517                              I915_BO_ALLOC_STRUCT_PAGE);
518
519         obj->write_domain = I915_GEM_DOMAIN_CPU;
520         obj->read_domains = I915_GEM_DOMAIN_CPU;
521
522         if (HAS_LLC(i915))
523                 /* On some devices, we can have the GPU use the LLC (the CPU
524                  * cache) for about a 10% performance improvement
525                  * compared to uncached.  Graphics requests other than
526                  * display scanout are coherent with the CPU in
527                  * accessing this cache.  This means in this mode we
528                  * don't need to clflush on the CPU side, and on the
529                  * GPU side we only need to flush internal caches to
530                  * get data visible to the CPU.
531                  *
532                  * However, we maintain the display planes as UC, and so
533                  * need to rebind when first used as such.
534                  */
535                 cache_level = I915_CACHE_LLC;
536         else
537                 cache_level = I915_CACHE_NONE;
538
539         i915_gem_object_set_cache_coherency(obj, cache_level);
540
541         i915_gem_object_init_memory_region(obj, mem);
542
543         return 0;
544 }
545
546 struct drm_i915_gem_object *
547 i915_gem_object_create_shmem(struct drm_i915_private *i915,
548                              resource_size_t size)
549 {
550         return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
551                                              size, 0);
552 }
553
554 /* Allocate a new GEM object and fill it with the supplied data */
555 struct drm_i915_gem_object *
556 i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
557                                        const void *data, resource_size_t size)
558 {
559         struct drm_i915_gem_object *obj;
560         struct file *file;
561         resource_size_t offset;
562         int err;
563
564         obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
565         if (IS_ERR(obj))
566                 return obj;
567
568         GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
569
570         file = obj->base.filp;
571         offset = 0;
572         do {
573                 unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
574                 struct page *page;
575                 void *pgdata, *vaddr;
576
577                 err = pagecache_write_begin(file, file->f_mapping,
578                                             offset, len, 0,
579                                             &page, &pgdata);
580                 if (err < 0)
581                         goto fail;
582
583                 vaddr = kmap(page);
584                 memcpy(vaddr, data, len);
585                 kunmap(page);
586
587                 err = pagecache_write_end(file, file->f_mapping,
588                                           offset, len, len,
589                                           page, pgdata);
590                 if (err < 0)
591                         goto fail;
592
593                 size -= len;
594                 data += len;
595                 offset += len;
596         } while (size);
597
598         return obj;
599
600 fail:
601         i915_gem_object_put(obj);
602         return ERR_PTR(err);
603 }
604
605 static int init_shmem(struct intel_memory_region *mem)
606 {
607         int err;
608
609         err = i915_gemfs_init(mem->i915);
610         if (err) {
611                 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n",
612                          err);
613         }
614
615         intel_memory_region_set_name(mem, "system");
616
617         return 0; /* Don't error, we can simply fallback to the kernel mnt */
618 }
619
620 static void release_shmem(struct intel_memory_region *mem)
621 {
622         i915_gemfs_fini(mem->i915);
623 }
624
625 static const struct intel_memory_region_ops shmem_region_ops = {
626         .init = init_shmem,
627         .release = release_shmem,
628         .init_object = shmem_object_init,
629 };
630
631 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915)
632 {
633         return intel_memory_region_create(i915, 0,
634                                           totalram_pages() << PAGE_SHIFT,
635                                           PAGE_SIZE, 0,
636                                           &shmem_region_ops);
637 }
638
639 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj)
640 {
641         return obj->ops == &i915_gem_shmem_ops;
642 }