2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
7 #include <linux/pagevec.h>
8 #include <linux/swap.h>
10 #include "gem/i915_gem_region.h"
12 #include "i915_gemfs.h"
13 #include "i915_gem_object.h"
14 #include "i915_scatterlist.h"
15 #include "i915_trace.h"
18 * Move pages to appropriate lru and release the pagevec, decrementing the
19 * ref count of those pages.
21 static void check_release_pagevec(struct pagevec *pvec)
23 check_move_unevictable_pages(pvec);
24 __pagevec_release(pvec);
28 static int shmem_get_pages(struct drm_i915_gem_object *obj)
30 struct drm_i915_private *i915 = to_i915(obj->base.dev);
31 struct intel_memory_region *mem = obj->mm.region;
32 const unsigned long page_count = obj->base.size / PAGE_SIZE;
34 struct address_space *mapping;
36 struct scatterlist *sg;
37 struct sgt_iter sgt_iter;
39 unsigned long last_pfn = 0; /* suppress gcc warning */
40 unsigned int max_segment = i915_sg_segment_size();
41 unsigned int sg_page_sizes;
46 * Assert that the object is not currently in any GPU domain. As it
47 * wasn't in the GTT, there shouldn't be any way it could have been in
50 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
51 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
54 * If there's no chance of allocating enough pages for the whole
57 if (obj->base.size > resource_size(&mem->region))
60 st = kmalloc(sizeof(*st), GFP_KERNEL);
65 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
71 * Get the list of pages out of our struct file. They'll be pinned
72 * at this point until we release them.
74 * Fail silently without starting the shrinker
76 mapping = obj->base.filp->f_mapping;
77 mapping_set_unevictable(mapping);
78 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
79 noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
84 for (i = 0; i < page_count; i++) {
85 const unsigned int shrink[] = {
86 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
89 gfp_t gfp = noreclaim;
93 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
102 i915_gem_shrink(NULL, i915, 2 * page_count, NULL, *s++);
105 * We've tried hard to allocate the memory by reaping
106 * our own buffer, now let the real VM do its job and
107 * go down in flames if truly OOM.
109 * However, since graphics tend to be disposable,
110 * defer the oom here by reporting the ENOMEM back
114 /* reclaim and warn, but no oom */
115 gfp = mapping_gfp_mask(mapping);
118 * Our bo are always dirty and so we require
119 * kswapd to reclaim our pages (direct reclaim
120 * does not effectively begin pageout of our
121 * buffers on its own). However, direct reclaim
122 * only waits for kswapd when under allocation
123 * congestion. So as a result __GFP_RECLAIM is
124 * unreliable and fails to actually reclaim our
125 * dirty pages -- unless you try over and over
126 * again with !__GFP_NORETRY. However, we still
127 * want to fail this allocation rather than
128 * trigger the out-of-memory killer and for
129 * this we want __GFP_RETRY_MAYFAIL.
131 gfp |= __GFP_RETRY_MAYFAIL;
136 sg->length >= max_segment ||
137 page_to_pfn(page) != last_pfn + 1) {
139 sg_page_sizes |= sg->length;
143 sg_set_page(sg, page, PAGE_SIZE, 0);
145 sg->length += PAGE_SIZE;
147 last_pfn = page_to_pfn(page);
149 /* Check that the i965g/gm workaround works. */
150 GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL);
152 if (sg) { /* loop terminated early; short sg table */
153 sg_page_sizes |= sg->length;
157 /* Trim unused sg entries to avoid wasting memory. */
160 ret = i915_gem_gtt_prepare_pages(obj, st);
163 * DMA remapping failed? One possible cause is that
164 * it could not reserve enough large entries, asking
165 * for PAGE_SIZE chunks instead may be helpful.
167 if (max_segment > PAGE_SIZE) {
168 for_each_sgt_page(page, sgt_iter, st)
172 max_segment = PAGE_SIZE;
175 dev_warn(i915->drm.dev,
176 "Failed to DMA remap %lu pages\n",
182 if (i915_gem_object_needs_bit17_swizzle(obj))
183 i915_gem_object_do_bit_17_swizzle(obj, st);
185 if (i915_gem_object_can_bypass_llc(obj))
186 obj->cache_dirty = true;
188 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
195 mapping_clear_unevictable(mapping);
200 for_each_sgt_page(page, sgt_iter, st) {
201 if (!pagevec_add(&pvec, page))
202 check_release_pagevec(&pvec);
204 if (pagevec_count(&pvec))
205 check_release_pagevec(&pvec);
211 * shmemfs first checks if there is enough memory to allocate the page
212 * and reports ENOSPC should there be insufficient, along with the usual
213 * ENOMEM for a genuine allocation failure.
215 * We use ENOSPC in our driver to mean that we have run out of aperture
216 * space and so want to translate the error from shmemfs back to our
217 * usual understanding of ENOMEM.
226 shmem_truncate(struct drm_i915_gem_object *obj)
229 * Our goal here is to return as much of the memory as
230 * is possible back to the system as we are called from OOM.
231 * To do this we must instruct the shmfs to drop all of its
232 * backing pages, *now*.
234 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
235 obj->mm.madv = __I915_MADV_PURGED;
236 obj->mm.pages = ERR_PTR(-EFAULT);
240 shmem_writeback(struct drm_i915_gem_object *obj)
242 struct address_space *mapping;
243 struct writeback_control wbc = {
244 .sync_mode = WB_SYNC_NONE,
245 .nr_to_write = SWAP_CLUSTER_MAX,
247 .range_end = LLONG_MAX,
253 * Leave mmapings intact (GTT will have been revoked on unbinding,
254 * leaving only CPU mmapings around) and add those pages to the LRU
255 * instead of invoking writeback so they are aged and paged out
258 mapping = obj->base.filp->f_mapping;
260 /* Begin writeback on each dirty page */
261 for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) {
264 page = find_lock_page(mapping, i);
268 if (!page_mapped(page) && clear_page_dirty_for_io(page)) {
271 SetPageReclaim(page);
272 ret = mapping->a_ops->writepage(page, &wbc);
273 if (!PageWriteback(page))
274 ClearPageReclaim(page);
285 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
286 struct sg_table *pages,
289 struct drm_i915_private *i915 = to_i915(obj->base.dev);
291 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
293 if (obj->mm.madv == I915_MADV_DONTNEED)
294 obj->mm.dirty = false;
297 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
298 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
299 drm_clflush_sg(pages);
301 __start_cpu_write(obj);
303 * On non-LLC platforms, force the flush-on-acquire if this is ever
304 * swapped-in. Our async flush path is not trust worthy enough yet(and
305 * happens in the wrong order), and with some tricks it's conceivable
306 * for userspace to change the cache-level to I915_CACHE_NONE after the
307 * pages are swapped-in, and since execbuf binds the object before doing
308 * the async flush, we have a race window.
311 obj->cache_dirty = true;
314 void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
316 struct sgt_iter sgt_iter;
320 GEM_WARN_ON(IS_DGFX(to_i915(obj->base.dev)));
321 __i915_gem_object_release_shmem(obj, pages, true);
323 i915_gem_gtt_finish_pages(obj, pages);
325 if (i915_gem_object_needs_bit17_swizzle(obj))
326 i915_gem_object_save_bit_17_swizzle(obj, pages);
328 mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
331 for_each_sgt_page(page, sgt_iter, pages) {
333 set_page_dirty(page);
335 if (obj->mm.madv == I915_MADV_WILLNEED)
336 mark_page_accessed(page);
338 if (!pagevec_add(&pvec, page))
339 check_release_pagevec(&pvec);
341 if (pagevec_count(&pvec))
342 check_release_pagevec(&pvec);
343 obj->mm.dirty = false;
345 sg_free_table(pages);
350 shmem_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages)
352 if (likely(i915_gem_object_has_struct_page(obj)))
353 i915_gem_object_put_pages_shmem(obj, pages);
355 i915_gem_object_put_pages_phys(obj, pages);
359 shmem_pwrite(struct drm_i915_gem_object *obj,
360 const struct drm_i915_gem_pwrite *arg)
362 struct address_space *mapping = obj->base.filp->f_mapping;
363 char __user *user_data = u64_to_user_ptr(arg->data_ptr);
367 /* Caller already validated user args */
368 GEM_BUG_ON(!access_ok(user_data, arg->size));
370 if (!i915_gem_object_has_struct_page(obj))
371 return i915_gem_object_pwrite_phys(obj, arg);
374 * Before we instantiate/pin the backing store for our use, we
375 * can prepopulate the shmemfs filp efficiently using a write into
376 * the pagecache. We avoid the penalty of instantiating all the
377 * pages, important if the user is just writing to a few and never
378 * uses the object on the GPU, and using a direct write into shmemfs
379 * allows it to avoid the cost of retrieving a page (either swapin
380 * or clearing-before-use) before it is overwritten.
382 if (i915_gem_object_has_pages(obj))
385 if (obj->mm.madv != I915_MADV_WILLNEED)
389 * Before the pages are instantiated the object is treated as being
390 * in the CPU domain. The pages will be clflushed as required before
391 * use, and we can freely write into the pages directly. If userspace
392 * races pwrite with any other operation; corruption will ensue -
393 * that is userspace's prerogative!
397 offset = arg->offset;
398 pg = offset_in_page(offset);
401 unsigned int len, unwritten;
407 len = PAGE_SIZE - pg;
411 /* Prefault the user page to reduce potential recursion */
412 err = __get_user(c, user_data);
416 err = __get_user(c, user_data + len - 1);
420 err = pagecache_write_begin(obj->base.filp, mapping,
426 vaddr = kmap_atomic(page);
427 unwritten = __copy_from_user_inatomic(vaddr + pg,
430 kunmap_atomic(vaddr);
432 err = pagecache_write_end(obj->base.filp, mapping,
433 offset, len, len - unwritten,
438 /* We don't handle -EFAULT, leave it to the caller to check */
452 shmem_pread(struct drm_i915_gem_object *obj,
453 const struct drm_i915_gem_pread *arg)
455 if (!i915_gem_object_has_struct_page(obj))
456 return i915_gem_object_pread_phys(obj, arg);
461 static void shmem_release(struct drm_i915_gem_object *obj)
463 if (i915_gem_object_has_struct_page(obj))
464 i915_gem_object_release_memory_region(obj);
466 fput(obj->base.filp);
469 const struct drm_i915_gem_object_ops i915_gem_shmem_ops = {
470 .name = "i915_gem_object_shmem",
471 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
473 .get_pages = shmem_get_pages,
474 .put_pages = shmem_put_pages,
475 .truncate = shmem_truncate,
476 .writeback = shmem_writeback,
478 .pwrite = shmem_pwrite,
479 .pread = shmem_pread,
481 .release = shmem_release,
484 static int __create_shmem(struct drm_i915_private *i915,
485 struct drm_gem_object *obj,
486 resource_size_t size)
488 unsigned long flags = VM_NORESERVE;
491 drm_gem_private_object_init(&i915->drm, obj, size);
494 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
497 filp = shmem_file_setup("i915", size, flags);
499 return PTR_ERR(filp);
505 static int shmem_object_init(struct intel_memory_region *mem,
506 struct drm_i915_gem_object *obj,
507 resource_size_t size,
508 resource_size_t page_size,
511 static struct lock_class_key lock_class;
512 struct drm_i915_private *i915 = mem->i915;
513 struct address_space *mapping;
514 unsigned int cache_level;
518 ret = __create_shmem(i915, &obj->base, size);
522 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
523 if (IS_I965GM(i915) || IS_I965G(i915)) {
524 /* 965gm cannot relocate objects above 4GiB. */
525 mask &= ~__GFP_HIGHMEM;
529 mapping = obj->base.filp->f_mapping;
530 mapping_set_gfp_mask(mapping, mask);
531 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
533 i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
534 obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
535 obj->write_domain = I915_GEM_DOMAIN_CPU;
536 obj->read_domains = I915_GEM_DOMAIN_CPU;
539 /* On some devices, we can have the GPU use the LLC (the CPU
540 * cache) for about a 10% performance improvement
541 * compared to uncached. Graphics requests other than
542 * display scanout are coherent with the CPU in
543 * accessing this cache. This means in this mode we
544 * don't need to clflush on the CPU side, and on the
545 * GPU side we only need to flush internal caches to
546 * get data visible to the CPU.
548 * However, we maintain the display planes as UC, and so
549 * need to rebind when first used as such.
551 cache_level = I915_CACHE_LLC;
553 cache_level = I915_CACHE_NONE;
555 i915_gem_object_set_cache_coherency(obj, cache_level);
557 i915_gem_object_init_memory_region(obj, mem);
562 struct drm_i915_gem_object *
563 i915_gem_object_create_shmem(struct drm_i915_private *i915,
564 resource_size_t size)
566 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
570 /* Allocate a new GEM object and fill it with the supplied data */
571 struct drm_i915_gem_object *
572 i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
573 const void *data, resource_size_t size)
575 struct drm_i915_gem_object *obj;
577 resource_size_t offset;
580 GEM_WARN_ON(IS_DGFX(dev_priv));
581 obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
585 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
587 file = obj->base.filp;
590 unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
592 void *pgdata, *vaddr;
594 err = pagecache_write_begin(file, file->f_mapping,
601 memcpy(vaddr, data, len);
604 err = pagecache_write_end(file, file->f_mapping,
618 i915_gem_object_put(obj);
622 static int init_shmem(struct intel_memory_region *mem)
626 err = i915_gemfs_init(mem->i915);
628 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n",
632 intel_memory_region_set_name(mem, "system");
634 return 0; /* Don't error, we can simply fallback to the kernel mnt */
637 static void release_shmem(struct intel_memory_region *mem)
639 i915_gemfs_fini(mem->i915);
642 static const struct intel_memory_region_ops shmem_region_ops = {
644 .release = release_shmem,
645 .init_object = shmem_object_init,
648 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
649 u16 type, u16 instance)
651 return intel_memory_region_create(i915, 0,
652 totalram_pages() << PAGE_SHIFT,
658 bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj)
660 return obj->ops == &i915_gem_shmem_ops;