2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
32 #include "i915_gem_dmabuf.h"
33 #include "i915_vgpu.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36 #include "intel_frontbuffer.h"
37 #include "intel_mocs.h"
38 #include <linux/reservation.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/slab.h>
41 #include <linux/swap.h>
42 #include <linux/pci.h>
43 #include <linux/dma-buf.h>
45 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
46 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
48 static bool cpu_cache_is_coherent(struct drm_device *dev,
49 enum i915_cache_level level)
51 return HAS_LLC(dev) || level != I915_CACHE_NONE;
54 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
56 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
59 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
62 return obj->pin_display;
66 insert_mappable_node(struct drm_i915_private *i915,
67 struct drm_mm_node *node, u32 size)
69 memset(node, 0, sizeof(*node));
70 return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
72 i915->ggtt.mappable_end,
73 DRM_MM_SEARCH_DEFAULT,
74 DRM_MM_CREATE_DEFAULT);
78 remove_mappable_node(struct drm_mm_node *node)
80 drm_mm_remove_node(node);
83 /* some bookkeeping */
84 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
87 spin_lock(&dev_priv->mm.object_stat_lock);
88 dev_priv->mm.object_count++;
89 dev_priv->mm.object_memory += size;
90 spin_unlock(&dev_priv->mm.object_stat_lock);
93 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
96 spin_lock(&dev_priv->mm.object_stat_lock);
97 dev_priv->mm.object_count--;
98 dev_priv->mm.object_memory -= size;
99 spin_unlock(&dev_priv->mm.object_stat_lock);
103 i915_gem_wait_for_error(struct i915_gpu_error *error)
107 if (!i915_reset_in_progress(error))
111 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
112 * userspace. If it takes that long something really bad is going on and
113 * we should simply try to bail out and fail as gracefully as possible.
115 ret = wait_event_interruptible_timeout(error->reset_queue,
116 !i915_reset_in_progress(error),
119 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
121 } else if (ret < 0) {
128 int i915_mutex_lock_interruptible(struct drm_device *dev)
130 struct drm_i915_private *dev_priv = to_i915(dev);
133 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
137 ret = mutex_lock_interruptible(&dev->struct_mutex);
145 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
146 struct drm_file *file)
148 struct drm_i915_private *dev_priv = to_i915(dev);
149 struct i915_ggtt *ggtt = &dev_priv->ggtt;
150 struct drm_i915_gem_get_aperture *args = data;
151 struct i915_vma *vma;
155 mutex_lock(&dev->struct_mutex);
156 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
157 if (i915_vma_is_pinned(vma))
158 pinned += vma->node.size;
159 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
160 if (i915_vma_is_pinned(vma))
161 pinned += vma->node.size;
162 mutex_unlock(&dev->struct_mutex);
164 args->aper_size = ggtt->base.total;
165 args->aper_available_size = args->aper_size - pinned;
171 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
173 struct address_space *mapping = obj->base.filp->f_mapping;
174 char *vaddr = obj->phys_handle->vaddr;
176 struct scatterlist *sg;
179 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
182 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
186 page = shmem_read_mapping_page(mapping, i);
188 return PTR_ERR(page);
190 src = kmap_atomic(page);
191 memcpy(vaddr, src, PAGE_SIZE);
192 drm_clflush_virt_range(vaddr, PAGE_SIZE);
199 i915_gem_chipset_flush(to_i915(obj->base.dev));
201 st = kmalloc(sizeof(*st), GFP_KERNEL);
205 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
212 sg->length = obj->base.size;
214 sg_dma_address(sg) = obj->phys_handle->busaddr;
215 sg_dma_len(sg) = obj->base.size;
222 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
226 BUG_ON(obj->madv == __I915_MADV_PURGED);
228 ret = i915_gem_object_set_to_cpu_domain(obj, true);
230 /* In the event of a disaster, abandon all caches and
233 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
236 if (obj->madv == I915_MADV_DONTNEED)
240 struct address_space *mapping = obj->base.filp->f_mapping;
241 char *vaddr = obj->phys_handle->vaddr;
244 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
248 page = shmem_read_mapping_page(mapping, i);
252 dst = kmap_atomic(page);
253 drm_clflush_virt_range(vaddr, PAGE_SIZE);
254 memcpy(dst, vaddr, PAGE_SIZE);
257 set_page_dirty(page);
258 if (obj->madv == I915_MADV_WILLNEED)
259 mark_page_accessed(page);
266 sg_free_table(obj->pages);
271 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
273 drm_pci_free(obj->base.dev, obj->phys_handle);
276 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
277 .get_pages = i915_gem_object_get_pages_phys,
278 .put_pages = i915_gem_object_put_pages_phys,
279 .release = i915_gem_object_release_phys,
282 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
284 struct i915_vma *vma;
285 LIST_HEAD(still_in_list);
288 lockdep_assert_held(&obj->base.dev->struct_mutex);
290 /* Closed vma are removed from the obj->vma_list - but they may
291 * still have an active binding on the object. To remove those we
292 * must wait for all rendering to complete to the object (as unbinding
293 * must anyway), and retire the requests.
295 ret = i915_gem_object_wait_rendering(obj, false);
299 i915_gem_retire_requests(to_i915(obj->base.dev));
301 while ((vma = list_first_entry_or_null(&obj->vma_list,
304 list_move_tail(&vma->obj_link, &still_in_list);
305 ret = i915_vma_unbind(vma);
309 list_splice(&still_in_list, &obj->vma_list);
315 * Ensures that all rendering to the object has completed and the object is
316 * safe to unbind from the GTT or access from the CPU.
317 * @obj: i915 gem object
318 * @readonly: waiting for just read access or read-write access
321 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
324 struct reservation_object *resv;
325 struct i915_gem_active *active;
326 unsigned long active_mask;
329 lockdep_assert_held(&obj->base.dev->struct_mutex);
332 active = obj->last_read;
333 active_mask = i915_gem_object_get_active(obj);
336 active = &obj->last_write;
339 for_each_active(active_mask, idx) {
342 ret = i915_gem_active_wait(&active[idx],
343 &obj->base.dev->struct_mutex);
348 resv = i915_gem_object_get_dmabuf_resv(obj);
352 err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
353 MAX_SCHEDULE_TIMEOUT);
361 /* A nonblocking variant of the above wait. Must be called prior to
362 * acquiring the mutex for the object, as the object state may change
363 * during this call. A reference must be held by the caller for the object.
365 static __must_check int
366 __unsafe_wait_rendering(struct drm_i915_gem_object *obj,
367 struct intel_rps_client *rps,
370 struct i915_gem_active *active;
371 unsigned long active_mask;
374 active_mask = __I915_BO_ACTIVE(obj);
379 active = obj->last_read;
382 active = &obj->last_write;
385 for_each_active(active_mask, idx) {
388 ret = i915_gem_active_wait_unlocked(&active[idx],
389 I915_WAIT_INTERRUPTIBLE,
398 static struct intel_rps_client *to_rps_client(struct drm_file *file)
400 struct drm_i915_file_private *fpriv = file->driver_priv;
406 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
409 drm_dma_handle_t *phys;
412 if (obj->phys_handle) {
413 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
419 if (obj->madv != I915_MADV_WILLNEED)
422 if (obj->base.filp == NULL)
425 ret = i915_gem_object_unbind(obj);
429 ret = i915_gem_object_put_pages(obj);
433 /* create a new object */
434 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
438 obj->phys_handle = phys;
439 obj->ops = &i915_gem_phys_ops;
441 return i915_gem_object_get_pages(obj);
445 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
446 struct drm_i915_gem_pwrite *args,
447 struct drm_file *file_priv)
449 struct drm_device *dev = obj->base.dev;
450 void *vaddr = obj->phys_handle->vaddr + args->offset;
451 char __user *user_data = u64_to_user_ptr(args->data_ptr);
454 /* We manually control the domain here and pretend that it
455 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
457 ret = i915_gem_object_wait_rendering(obj, false);
461 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
462 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
463 unsigned long unwritten;
465 /* The physical object once assigned is fixed for the lifetime
466 * of the obj, so we can safely drop the lock and continue
469 mutex_unlock(&dev->struct_mutex);
470 unwritten = copy_from_user(vaddr, user_data, args->size);
471 mutex_lock(&dev->struct_mutex);
478 drm_clflush_virt_range(vaddr, args->size);
479 i915_gem_chipset_flush(to_i915(dev));
482 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
486 void *i915_gem_object_alloc(struct drm_device *dev)
488 struct drm_i915_private *dev_priv = to_i915(dev);
489 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
492 void i915_gem_object_free(struct drm_i915_gem_object *obj)
494 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
495 kmem_cache_free(dev_priv->objects, obj);
499 i915_gem_create(struct drm_file *file,
500 struct drm_device *dev,
504 struct drm_i915_gem_object *obj;
508 size = roundup(size, PAGE_SIZE);
512 /* Allocate the new object */
513 obj = i915_gem_object_create(dev, size);
517 ret = drm_gem_handle_create(file, &obj->base, &handle);
518 /* drop reference from allocate - handle holds it now */
519 i915_gem_object_put_unlocked(obj);
528 i915_gem_dumb_create(struct drm_file *file,
529 struct drm_device *dev,
530 struct drm_mode_create_dumb *args)
532 /* have to work out size/pitch and return them */
533 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
534 args->size = args->pitch * args->height;
535 return i915_gem_create(file, dev,
536 args->size, &args->handle);
540 * Creates a new mm object and returns a handle to it.
541 * @dev: drm device pointer
542 * @data: ioctl data blob
543 * @file: drm file pointer
546 i915_gem_create_ioctl(struct drm_device *dev, void *data,
547 struct drm_file *file)
549 struct drm_i915_gem_create *args = data;
551 return i915_gem_create(file, dev,
552 args->size, &args->handle);
556 __copy_to_user_swizzled(char __user *cpu_vaddr,
557 const char *gpu_vaddr, int gpu_offset,
560 int ret, cpu_offset = 0;
563 int cacheline_end = ALIGN(gpu_offset + 1, 64);
564 int this_length = min(cacheline_end - gpu_offset, length);
565 int swizzled_gpu_offset = gpu_offset ^ 64;
567 ret = __copy_to_user(cpu_vaddr + cpu_offset,
568 gpu_vaddr + swizzled_gpu_offset,
573 cpu_offset += this_length;
574 gpu_offset += this_length;
575 length -= this_length;
582 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
583 const char __user *cpu_vaddr,
586 int ret, cpu_offset = 0;
589 int cacheline_end = ALIGN(gpu_offset + 1, 64);
590 int this_length = min(cacheline_end - gpu_offset, length);
591 int swizzled_gpu_offset = gpu_offset ^ 64;
593 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
594 cpu_vaddr + cpu_offset,
599 cpu_offset += this_length;
600 gpu_offset += this_length;
601 length -= this_length;
608 * Pins the specified object's pages and synchronizes the object with
609 * GPU accesses. Sets needs_clflush to non-zero if the caller should
610 * flush the object from the CPU cache.
612 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
613 unsigned int *needs_clflush)
619 if (!i915_gem_object_has_struct_page(obj))
622 ret = i915_gem_object_wait_rendering(obj, true);
626 ret = i915_gem_object_get_pages(obj);
630 i915_gem_object_pin_pages(obj);
632 i915_gem_object_flush_gtt_write_domain(obj);
634 /* If we're not in the cpu read domain, set ourself into the gtt
635 * read domain and manually flush cachelines (if required). This
636 * optimizes for the case when the gpu will dirty the data
637 * anyway again before the next pread happens.
639 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
640 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
643 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
644 ret = i915_gem_object_set_to_cpu_domain(obj, false);
651 /* return with the pages pinned */
655 i915_gem_object_unpin_pages(obj);
659 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
660 unsigned int *needs_clflush)
665 if (!i915_gem_object_has_struct_page(obj))
668 ret = i915_gem_object_wait_rendering(obj, false);
672 ret = i915_gem_object_get_pages(obj);
676 i915_gem_object_pin_pages(obj);
678 i915_gem_object_flush_gtt_write_domain(obj);
680 /* If we're not in the cpu write domain, set ourself into the
681 * gtt write domain and manually flush cachelines (as required).
682 * This optimizes for the case when the gpu will use the data
683 * right away and we therefore have to clflush anyway.
685 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
686 *needs_clflush |= cpu_write_needs_clflush(obj) << 1;
688 /* Same trick applies to invalidate partially written cachelines read
691 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
692 *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
695 if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
696 ret = i915_gem_object_set_to_cpu_domain(obj, true);
703 if ((*needs_clflush & CLFLUSH_AFTER) == 0)
704 obj->cache_dirty = true;
706 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
708 /* return with the pages pinned */
712 i915_gem_object_unpin_pages(obj);
716 /* Per-page copy function for the shmem pread fastpath.
717 * Flushes invalid cachelines before reading the target if
718 * needs_clflush is set. */
720 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
721 char __user *user_data,
722 bool page_do_bit17_swizzling, bool needs_clflush)
727 if (unlikely(page_do_bit17_swizzling))
730 vaddr = kmap_atomic(page);
732 drm_clflush_virt_range(vaddr + shmem_page_offset,
734 ret = __copy_to_user_inatomic(user_data,
735 vaddr + shmem_page_offset,
737 kunmap_atomic(vaddr);
739 return ret ? -EFAULT : 0;
743 shmem_clflush_swizzled_range(char *addr, unsigned long length,
746 if (unlikely(swizzled)) {
747 unsigned long start = (unsigned long) addr;
748 unsigned long end = (unsigned long) addr + length;
750 /* For swizzling simply ensure that we always flush both
751 * channels. Lame, but simple and it works. Swizzled
752 * pwrite/pread is far from a hotpath - current userspace
753 * doesn't use it at all. */
754 start = round_down(start, 128);
755 end = round_up(end, 128);
757 drm_clflush_virt_range((void *)start, end - start);
759 drm_clflush_virt_range(addr, length);
764 /* Only difference to the fast-path function is that this can handle bit17
765 * and uses non-atomic copy and kmap functions. */
767 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
768 char __user *user_data,
769 bool page_do_bit17_swizzling, bool needs_clflush)
776 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
778 page_do_bit17_swizzling);
780 if (page_do_bit17_swizzling)
781 ret = __copy_to_user_swizzled(user_data,
782 vaddr, shmem_page_offset,
785 ret = __copy_to_user(user_data,
786 vaddr + shmem_page_offset,
790 return ret ? - EFAULT : 0;
793 static inline unsigned long
794 slow_user_access(struct io_mapping *mapping,
795 uint64_t page_base, int page_offset,
796 char __user *user_data,
797 unsigned long length, bool pwrite)
799 void __iomem *ioaddr;
803 ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
804 /* We can use the cpu mem copy function because this is X86. */
805 vaddr = (void __force *)ioaddr + page_offset;
807 unwritten = __copy_from_user(vaddr, user_data, length);
809 unwritten = __copy_to_user(user_data, vaddr, length);
811 io_mapping_unmap(ioaddr);
816 i915_gem_gtt_pread(struct drm_device *dev,
817 struct drm_i915_gem_object *obj, uint64_t size,
818 uint64_t data_offset, uint64_t data_ptr)
820 struct drm_i915_private *dev_priv = to_i915(dev);
821 struct i915_ggtt *ggtt = &dev_priv->ggtt;
822 struct i915_vma *vma;
823 struct drm_mm_node node;
824 char __user *user_data;
829 intel_runtime_pm_get(to_i915(dev));
830 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
832 node.start = i915_ggtt_offset(vma);
833 node.allocated = false;
834 ret = i915_vma_put_fence(vma);
841 ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
845 ret = i915_gem_object_get_pages(obj);
847 remove_mappable_node(&node);
851 i915_gem_object_pin_pages(obj);
854 ret = i915_gem_object_set_to_gtt_domain(obj, false);
858 user_data = u64_to_user_ptr(data_ptr);
860 offset = data_offset;
862 mutex_unlock(&dev->struct_mutex);
863 if (likely(!i915.prefault_disable)) {
864 ret = fault_in_pages_writeable(user_data, remain);
866 mutex_lock(&dev->struct_mutex);
872 /* Operation in this page
874 * page_base = page offset within aperture
875 * page_offset = offset within page
876 * page_length = bytes to copy for this page
878 u32 page_base = node.start;
879 unsigned page_offset = offset_in_page(offset);
880 unsigned page_length = PAGE_SIZE - page_offset;
881 page_length = remain < page_length ? remain : page_length;
882 if (node.allocated) {
884 ggtt->base.insert_page(&ggtt->base,
885 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
890 page_base += offset & PAGE_MASK;
892 /* This is a slow read/write as it tries to read from
893 * and write to user memory which may result into page
894 * faults, and so we cannot perform this under struct_mutex.
896 if (slow_user_access(&ggtt->mappable, page_base,
897 page_offset, user_data,
898 page_length, false)) {
903 remain -= page_length;
904 user_data += page_length;
905 offset += page_length;
908 mutex_lock(&dev->struct_mutex);
909 if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
910 /* The user has modified the object whilst we tried
911 * reading from it, and we now have no idea what domain
912 * the pages should be in. As we have just been touching
913 * them directly, flush everything back to the GTT
916 ret = i915_gem_object_set_to_gtt_domain(obj, false);
920 if (node.allocated) {
922 ggtt->base.clear_range(&ggtt->base,
923 node.start, node.size);
924 i915_gem_object_unpin_pages(obj);
925 remove_mappable_node(&node);
930 intel_runtime_pm_put(to_i915(dev));
935 i915_gem_shmem_pread(struct drm_device *dev,
936 struct drm_i915_gem_object *obj,
937 struct drm_i915_gem_pread *args,
938 struct drm_file *file)
940 char __user *user_data;
943 int shmem_page_offset, page_length, ret = 0;
944 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
946 int needs_clflush = 0;
947 struct sg_page_iter sg_iter;
949 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
953 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
954 user_data = u64_to_user_ptr(args->data_ptr);
955 offset = args->offset;
958 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
959 offset >> PAGE_SHIFT) {
960 struct page *page = sg_page_iter_page(&sg_iter);
965 /* Operation in this page
967 * shmem_page_offset = offset within page in shmem file
968 * page_length = bytes to copy for this page
970 shmem_page_offset = offset_in_page(offset);
971 page_length = remain;
972 if ((shmem_page_offset + page_length) > PAGE_SIZE)
973 page_length = PAGE_SIZE - shmem_page_offset;
975 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
976 (page_to_phys(page) & (1 << 17)) != 0;
978 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
979 user_data, page_do_bit17_swizzling,
984 mutex_unlock(&dev->struct_mutex);
986 if (likely(!i915.prefault_disable) && !prefaulted) {
987 ret = fault_in_pages_writeable(user_data, remain);
988 /* Userspace is tricking us, but we've already clobbered
989 * its pages with the prefault and promised to write the
990 * data up to the first fault. Hence ignore any errors
991 * and just continue. */
996 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
997 user_data, page_do_bit17_swizzling,
1000 mutex_lock(&dev->struct_mutex);
1006 remain -= page_length;
1007 user_data += page_length;
1008 offset += page_length;
1012 i915_gem_obj_finish_shmem_access(obj);
1018 * Reads data from the object referenced by handle.
1019 * @dev: drm device pointer
1020 * @data: ioctl data blob
1021 * @file: drm file pointer
1023 * On error, the contents of *data are undefined.
1026 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1027 struct drm_file *file)
1029 struct drm_i915_gem_pread *args = data;
1030 struct drm_i915_gem_object *obj;
1033 if (args->size == 0)
1036 if (!access_ok(VERIFY_WRITE,
1037 u64_to_user_ptr(args->data_ptr),
1041 obj = i915_gem_object_lookup(file, args->handle);
1045 /* Bounds check source. */
1046 if (args->offset > obj->base.size ||
1047 args->size > obj->base.size - args->offset) {
1052 trace_i915_gem_object_pread(obj, args->offset, args->size);
1054 ret = __unsafe_wait_rendering(obj, to_rps_client(file), true);
1058 ret = i915_mutex_lock_interruptible(dev);
1062 ret = i915_gem_shmem_pread(dev, obj, args, file);
1064 /* pread for non shmem backed objects */
1065 if (ret == -EFAULT || ret == -ENODEV)
1066 ret = i915_gem_gtt_pread(dev, obj, args->size,
1067 args->offset, args->data_ptr);
1069 i915_gem_object_put(obj);
1070 mutex_unlock(&dev->struct_mutex);
1075 i915_gem_object_put_unlocked(obj);
1079 /* This is the fast write path which cannot handle
1080 * page faults in the source data
1084 fast_user_write(struct io_mapping *mapping,
1085 loff_t page_base, int page_offset,
1086 char __user *user_data,
1089 void __iomem *vaddr_atomic;
1091 unsigned long unwritten;
1093 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
1094 /* We can use the cpu mem copy function because this is X86. */
1095 vaddr = (void __force*)vaddr_atomic + page_offset;
1096 unwritten = __copy_from_user_inatomic_nocache(vaddr,
1098 io_mapping_unmap_atomic(vaddr_atomic);
1103 * This is the fast pwrite path, where we copy the data directly from the
1104 * user into the GTT, uncached.
1105 * @i915: i915 device private data
1106 * @obj: i915 gem object
1107 * @args: pwrite arguments structure
1108 * @file: drm file pointer
1111 i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
1112 struct drm_i915_gem_object *obj,
1113 struct drm_i915_gem_pwrite *args,
1114 struct drm_file *file)
1116 struct i915_ggtt *ggtt = &i915->ggtt;
1117 struct drm_device *dev = obj->base.dev;
1118 struct i915_vma *vma;
1119 struct drm_mm_node node;
1120 uint64_t remain, offset;
1121 char __user *user_data;
1123 bool hit_slow_path = false;
1125 if (i915_gem_object_is_tiled(obj))
1128 intel_runtime_pm_get(i915);
1129 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1130 PIN_MAPPABLE | PIN_NONBLOCK);
1132 node.start = i915_ggtt_offset(vma);
1133 node.allocated = false;
1134 ret = i915_vma_put_fence(vma);
1136 i915_vma_unpin(vma);
1141 ret = insert_mappable_node(i915, &node, PAGE_SIZE);
1145 ret = i915_gem_object_get_pages(obj);
1147 remove_mappable_node(&node);
1151 i915_gem_object_pin_pages(obj);
1154 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1158 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1161 user_data = u64_to_user_ptr(args->data_ptr);
1162 offset = args->offset;
1163 remain = args->size;
1165 /* Operation in this page
1167 * page_base = page offset within aperture
1168 * page_offset = offset within page
1169 * page_length = bytes to copy for this page
1171 u32 page_base = node.start;
1172 unsigned page_offset = offset_in_page(offset);
1173 unsigned page_length = PAGE_SIZE - page_offset;
1174 page_length = remain < page_length ? remain : page_length;
1175 if (node.allocated) {
1176 wmb(); /* flush the write before we modify the GGTT */
1177 ggtt->base.insert_page(&ggtt->base,
1178 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1179 node.start, I915_CACHE_NONE, 0);
1180 wmb(); /* flush modifications to the GGTT (insert_page) */
1182 page_base += offset & PAGE_MASK;
1184 /* If we get a fault while copying data, then (presumably) our
1185 * source page isn't available. Return the error and we'll
1186 * retry in the slow path.
1187 * If the object is non-shmem backed, we retry again with the
1188 * path that handles page fault.
1190 if (fast_user_write(&ggtt->mappable, page_base,
1191 page_offset, user_data, page_length)) {
1192 hit_slow_path = true;
1193 mutex_unlock(&dev->struct_mutex);
1194 if (slow_user_access(&ggtt->mappable,
1196 page_offset, user_data,
1197 page_length, true)) {
1199 mutex_lock(&dev->struct_mutex);
1203 mutex_lock(&dev->struct_mutex);
1206 remain -= page_length;
1207 user_data += page_length;
1208 offset += page_length;
1212 if (hit_slow_path) {
1214 (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
1215 /* The user has modified the object whilst we tried
1216 * reading from it, and we now have no idea what domain
1217 * the pages should be in. As we have just been touching
1218 * them directly, flush everything back to the GTT
1221 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1225 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1227 if (node.allocated) {
1229 ggtt->base.clear_range(&ggtt->base,
1230 node.start, node.size);
1231 i915_gem_object_unpin_pages(obj);
1232 remove_mappable_node(&node);
1234 i915_vma_unpin(vma);
1237 intel_runtime_pm_put(i915);
1241 /* Per-page copy function for the shmem pwrite fastpath.
1242 * Flushes invalid cachelines before writing to the target if
1243 * needs_clflush_before is set and flushes out any written cachelines after
1244 * writing if needs_clflush is set. */
1246 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
1247 char __user *user_data,
1248 bool page_do_bit17_swizzling,
1249 bool needs_clflush_before,
1250 bool needs_clflush_after)
1255 if (unlikely(page_do_bit17_swizzling))
1258 vaddr = kmap_atomic(page);
1259 if (needs_clflush_before)
1260 drm_clflush_virt_range(vaddr + shmem_page_offset,
1262 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
1263 user_data, page_length);
1264 if (needs_clflush_after)
1265 drm_clflush_virt_range(vaddr + shmem_page_offset,
1267 kunmap_atomic(vaddr);
1269 return ret ? -EFAULT : 0;
1272 /* Only difference to the fast-path function is that this can handle bit17
1273 * and uses non-atomic copy and kmap functions. */
1275 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
1276 char __user *user_data,
1277 bool page_do_bit17_swizzling,
1278 bool needs_clflush_before,
1279 bool needs_clflush_after)
1285 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1286 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1288 page_do_bit17_swizzling);
1289 if (page_do_bit17_swizzling)
1290 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
1294 ret = __copy_from_user(vaddr + shmem_page_offset,
1297 if (needs_clflush_after)
1298 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1300 page_do_bit17_swizzling);
1303 return ret ? -EFAULT : 0;
1307 i915_gem_shmem_pwrite(struct drm_device *dev,
1308 struct drm_i915_gem_object *obj,
1309 struct drm_i915_gem_pwrite *args,
1310 struct drm_file *file)
1314 char __user *user_data;
1315 int shmem_page_offset, page_length, ret = 0;
1316 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
1317 int hit_slowpath = 0;
1318 unsigned int needs_clflush;
1319 struct sg_page_iter sg_iter;
1321 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1325 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
1326 user_data = u64_to_user_ptr(args->data_ptr);
1327 offset = args->offset;
1328 remain = args->size;
1330 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
1331 offset >> PAGE_SHIFT) {
1332 struct page *page = sg_page_iter_page(&sg_iter);
1333 int partial_cacheline_write;
1338 /* Operation in this page
1340 * shmem_page_offset = offset within page in shmem file
1341 * page_length = bytes to copy for this page
1343 shmem_page_offset = offset_in_page(offset);
1345 page_length = remain;
1346 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1347 page_length = PAGE_SIZE - shmem_page_offset;
1349 /* If we don't overwrite a cacheline completely we need to be
1350 * careful to have up-to-date data by first clflushing. Don't
1351 * overcomplicate things and flush the entire patch. */
1352 partial_cacheline_write = needs_clflush & CLFLUSH_BEFORE &&
1353 ((shmem_page_offset | page_length)
1354 & (boot_cpu_data.x86_clflush_size - 1));
1356 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1357 (page_to_phys(page) & (1 << 17)) != 0;
1359 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1360 user_data, page_do_bit17_swizzling,
1361 partial_cacheline_write,
1362 needs_clflush & CLFLUSH_AFTER);
1367 mutex_unlock(&dev->struct_mutex);
1368 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1369 user_data, page_do_bit17_swizzling,
1370 partial_cacheline_write,
1371 needs_clflush & CLFLUSH_AFTER);
1373 mutex_lock(&dev->struct_mutex);
1379 remain -= page_length;
1380 user_data += page_length;
1381 offset += page_length;
1385 i915_gem_obj_finish_shmem_access(obj);
1389 * Fixup: Flush cpu caches in case we didn't flush the dirty
1390 * cachelines in-line while writing and the object moved
1391 * out of the cpu write domain while we've dropped the lock.
1393 if (!(needs_clflush & CLFLUSH_AFTER) &&
1394 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1395 if (i915_gem_clflush_object(obj, obj->pin_display))
1396 needs_clflush |= CLFLUSH_AFTER;
1400 if (needs_clflush & CLFLUSH_AFTER)
1401 i915_gem_chipset_flush(to_i915(dev));
1403 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1408 * Writes data to the object referenced by handle.
1410 * @data: ioctl data blob
1413 * On error, the contents of the buffer that were to be modified are undefined.
1416 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1417 struct drm_file *file)
1419 struct drm_i915_private *dev_priv = to_i915(dev);
1420 struct drm_i915_gem_pwrite *args = data;
1421 struct drm_i915_gem_object *obj;
1424 if (args->size == 0)
1427 if (!access_ok(VERIFY_READ,
1428 u64_to_user_ptr(args->data_ptr),
1432 if (likely(!i915.prefault_disable)) {
1433 ret = fault_in_pages_readable(u64_to_user_ptr(args->data_ptr),
1439 obj = i915_gem_object_lookup(file, args->handle);
1443 /* Bounds check destination. */
1444 if (args->offset > obj->base.size ||
1445 args->size > obj->base.size - args->offset) {
1450 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1452 ret = __unsafe_wait_rendering(obj, to_rps_client(file), false);
1456 intel_runtime_pm_get(dev_priv);
1458 ret = i915_mutex_lock_interruptible(dev);
1463 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1464 * it would end up going through the fenced access, and we'll get
1465 * different detiling behavior between reading and writing.
1466 * pread/pwrite currently are reading and writing from the CPU
1467 * perspective, requiring manual detiling by the client.
1469 if (!i915_gem_object_has_struct_page(obj) ||
1470 cpu_write_needs_clflush(obj))
1471 /* Note that the gtt paths might fail with non-page-backed user
1472 * pointers (e.g. gtt mappings when moving data between
1473 * textures). Fallback to the shmem path in that case.
1475 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
1477 if (ret == -EFAULT || ret == -ENOSPC) {
1478 if (obj->phys_handle)
1479 ret = i915_gem_phys_pwrite(obj, args, file);
1481 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1484 i915_gem_object_put(obj);
1485 mutex_unlock(&dev->struct_mutex);
1486 intel_runtime_pm_put(dev_priv);
1491 intel_runtime_pm_put(dev_priv);
1493 i915_gem_object_put_unlocked(obj);
1497 static inline enum fb_op_origin
1498 write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1500 return (domain == I915_GEM_DOMAIN_GTT ?
1501 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
1505 * Called when user space prepares to use an object with the CPU, either
1506 * through the mmap ioctl's mapping or a GTT mapping.
1508 * @data: ioctl data blob
1512 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1513 struct drm_file *file)
1515 struct drm_i915_gem_set_domain *args = data;
1516 struct drm_i915_gem_object *obj;
1517 uint32_t read_domains = args->read_domains;
1518 uint32_t write_domain = args->write_domain;
1521 /* Only handle setting domains to types used by the CPU. */
1522 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1525 /* Having something in the write domain implies it's in the read
1526 * domain, and only that read domain. Enforce that in the request.
1528 if (write_domain != 0 && read_domains != write_domain)
1531 obj = i915_gem_object_lookup(file, args->handle);
1535 /* Try to flush the object off the GPU without holding the lock.
1536 * We will repeat the flush holding the lock in the normal manner
1537 * to catch cases where we are gazumped.
1539 ret = __unsafe_wait_rendering(obj, to_rps_client(file), !write_domain);
1543 ret = i915_mutex_lock_interruptible(dev);
1547 if (read_domains & I915_GEM_DOMAIN_GTT)
1548 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1550 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1552 if (write_domain != 0)
1553 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1555 i915_gem_object_put(obj);
1556 mutex_unlock(&dev->struct_mutex);
1560 i915_gem_object_put_unlocked(obj);
1565 * Called when user space has done writes to this buffer
1567 * @data: ioctl data blob
1571 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1572 struct drm_file *file)
1574 struct drm_i915_gem_sw_finish *args = data;
1575 struct drm_i915_gem_object *obj;
1578 obj = i915_gem_object_lookup(file, args->handle);
1582 /* Pinned buffers may be scanout, so flush the cache */
1583 if (READ_ONCE(obj->pin_display)) {
1584 err = i915_mutex_lock_interruptible(dev);
1586 i915_gem_object_flush_cpu_write_domain(obj);
1587 mutex_unlock(&dev->struct_mutex);
1591 i915_gem_object_put_unlocked(obj);
1596 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1599 * @data: ioctl data blob
1602 * While the mapping holds a reference on the contents of the object, it doesn't
1603 * imply a ref on the object itself.
1607 * DRM driver writers who look a this function as an example for how to do GEM
1608 * mmap support, please don't implement mmap support like here. The modern way
1609 * to implement DRM mmap support is with an mmap offset ioctl (like
1610 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1611 * That way debug tooling like valgrind will understand what's going on, hiding
1612 * the mmap call in a driver private ioctl will break that. The i915 driver only
1613 * does cpu mmaps this way because we didn't know better.
1616 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1617 struct drm_file *file)
1619 struct drm_i915_gem_mmap *args = data;
1620 struct drm_i915_gem_object *obj;
1623 if (args->flags & ~(I915_MMAP_WC))
1626 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1629 obj = i915_gem_object_lookup(file, args->handle);
1633 /* prime objects have no backing filp to GEM mmap
1636 if (!obj->base.filp) {
1637 i915_gem_object_put_unlocked(obj);
1641 addr = vm_mmap(obj->base.filp, 0, args->size,
1642 PROT_READ | PROT_WRITE, MAP_SHARED,
1644 if (args->flags & I915_MMAP_WC) {
1645 struct mm_struct *mm = current->mm;
1646 struct vm_area_struct *vma;
1648 if (down_write_killable(&mm->mmap_sem)) {
1649 i915_gem_object_put_unlocked(obj);
1652 vma = find_vma(mm, addr);
1655 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1658 up_write(&mm->mmap_sem);
1660 /* This may race, but that's ok, it only gets set */
1661 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1663 i915_gem_object_put_unlocked(obj);
1664 if (IS_ERR((void *)addr))
1667 args->addr_ptr = (uint64_t) addr;
1672 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
1676 size = i915_gem_object_get_stride(obj);
1677 size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;
1679 return size >> PAGE_SHIFT;
1683 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1685 * A history of the GTT mmap interface:
1687 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1688 * aligned and suitable for fencing, and still fit into the available
1689 * mappable space left by the pinned display objects. A classic problem
1690 * we called the page-fault-of-doom where we would ping-pong between
1691 * two objects that could not fit inside the GTT and so the memcpy
1692 * would page one object in at the expense of the other between every
1695 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1696 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1697 * object is too large for the available space (or simply too large
1698 * for the mappable aperture!), a view is created instead and faulted
1699 * into userspace. (This view is aligned and sized appropriately for
1704 * * snoopable objects cannot be accessed via the GTT. It can cause machine
1705 * hangs on some architectures, corruption on others. An attempt to service
1706 * a GTT page fault from a snoopable object will generate a SIGBUS.
1708 * * the object must be able to fit into RAM (physical memory, though no
1709 * limited to the mappable aperture).
1714 * * a new GTT page fault will synchronize rendering from the GPU and flush
1715 * all data to system memory. Subsequent access will not be synchronized.
1717 * * all mappings are revoked on runtime device suspend.
1719 * * there are only 8, 16 or 32 fence registers to share between all users
1720 * (older machines require fence register for display and blitter access
1721 * as well). Contention of the fence registers will cause the previous users
1722 * to be unmapped and any new access will generate new page faults.
1724 * * running out of memory while servicing a fault may generate a SIGBUS,
1725 * rather than the expected SIGSEGV.
1727 int i915_gem_mmap_gtt_version(void)
1733 * i915_gem_fault - fault a page into the GTT
1734 * @area: CPU VMA in question
1737 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1738 * from userspace. The fault handler takes care of binding the object to
1739 * the GTT (if needed), allocating and programming a fence register (again,
1740 * only if needed based on whether the old reg is still valid or the object
1741 * is tiled) and inserting a new PTE into the faulting process.
1743 * Note that the faulting process may involve evicting existing objects
1744 * from the GTT and/or fence registers to make room. So performance may
1745 * suffer if the GTT working set is large or there are few fence registers
1748 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1749 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1751 int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
1753 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
1754 struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1755 struct drm_device *dev = obj->base.dev;
1756 struct drm_i915_private *dev_priv = to_i915(dev);
1757 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1758 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1759 struct i915_vma *vma;
1760 pgoff_t page_offset;
1764 /* We don't use vmf->pgoff since that has the fake offset */
1765 page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
1768 trace_i915_gem_object_fault(obj, page_offset, true, write);
1770 /* Try to flush the object off the GPU first without holding the lock.
1771 * Upon acquiring the lock, we will perform our sanity checks and then
1772 * repeat the flush holding the lock in the normal manner to catch cases
1773 * where we are gazumped.
1775 ret = __unsafe_wait_rendering(obj, NULL, !write);
1779 intel_runtime_pm_get(dev_priv);
1781 ret = i915_mutex_lock_interruptible(dev);
1785 /* Access to snoopable pages through the GTT is incoherent. */
1786 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1791 /* If the object is smaller than a couple of partial vma, it is
1792 * not worth only creating a single partial vma - we may as well
1793 * clear enough space for the full object.
1795 flags = PIN_MAPPABLE;
1796 if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
1797 flags |= PIN_NONBLOCK | PIN_NONFAULT;
1799 /* Now pin it into the GTT as needed */
1800 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
1802 struct i915_ggtt_view view;
1803 unsigned int chunk_size;
1805 /* Use a partial view if it is bigger than available space */
1806 chunk_size = MIN_CHUNK_PAGES;
1807 if (i915_gem_object_is_tiled(obj))
1808 chunk_size = max(chunk_size, tile_row_pages(obj));
1810 memset(&view, 0, sizeof(view));
1811 view.type = I915_GGTT_VIEW_PARTIAL;
1812 view.params.partial.offset = rounddown(page_offset, chunk_size);
1813 view.params.partial.size =
1814 min_t(unsigned int, chunk_size,
1815 vma_pages(area) - view.params.partial.offset);
1817 /* If the partial covers the entire object, just create a
1820 if (chunk_size >= obj->base.size >> PAGE_SHIFT)
1821 view.type = I915_GGTT_VIEW_NORMAL;
1823 /* Userspace is now writing through an untracked VMA, abandon
1824 * all hope that the hardware is able to track future writes.
1826 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1828 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1835 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1839 ret = i915_vma_get_fence(vma);
1843 /* Mark as being mmapped into userspace for later revocation */
1844 assert_rpm_wakelock_held(dev_priv);
1845 if (list_empty(&obj->userfault_link))
1846 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
1848 /* Finally, remap it using the new GTT offset */
1849 ret = remap_io_mapping(area,
1850 area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
1851 (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
1852 min_t(u64, vma->size, area->vm_end - area->vm_start),
1856 __i915_vma_unpin(vma);
1858 mutex_unlock(&dev->struct_mutex);
1860 intel_runtime_pm_put(dev_priv);
1865 * We eat errors when the gpu is terminally wedged to avoid
1866 * userspace unduly crashing (gl has no provisions for mmaps to
1867 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1868 * and so needs to be reported.
1870 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1871 ret = VM_FAULT_SIGBUS;
1876 * EAGAIN means the gpu is hung and we'll wait for the error
1877 * handler to reset everything when re-faulting in
1878 * i915_mutex_lock_interruptible.
1885 * EBUSY is ok: this just means that another thread
1886 * already did the job.
1888 ret = VM_FAULT_NOPAGE;
1895 ret = VM_FAULT_SIGBUS;
1898 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1899 ret = VM_FAULT_SIGBUS;
1906 * i915_gem_release_mmap - remove physical page mappings
1907 * @obj: obj in question
1909 * Preserve the reservation of the mmapping with the DRM core code, but
1910 * relinquish ownership of the pages back to the system.
1912 * It is vital that we remove the page mapping if we have mapped a tiled
1913 * object through the GTT and then lose the fence register due to
1914 * resource pressure. Similarly if the object has been moved out of the
1915 * aperture, than pages mapped into userspace must be revoked. Removing the
1916 * mapping will then trigger a page fault on the next user access, allowing
1917 * fixup by i915_gem_fault().
1920 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1922 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1924 /* Serialisation between user GTT access and our code depends upon
1925 * revoking the CPU's PTE whilst the mutex is held. The next user
1926 * pagefault then has to wait until we release the mutex.
1928 * Note that RPM complicates somewhat by adding an additional
1929 * requirement that operations to the GGTT be made holding the RPM
1932 lockdep_assert_held(&i915->drm.struct_mutex);
1933 intel_runtime_pm_get(i915);
1935 if (list_empty(&obj->userfault_link))
1938 list_del_init(&obj->userfault_link);
1939 drm_vma_node_unmap(&obj->base.vma_node,
1940 obj->base.dev->anon_inode->i_mapping);
1942 /* Ensure that the CPU's PTE are revoked and there are not outstanding
1943 * memory transactions from userspace before we return. The TLB
1944 * flushing implied above by changing the PTE above *should* be
1945 * sufficient, an extra barrier here just provides us with a bit
1946 * of paranoid documentation about our requirement to serialise
1947 * memory writes before touching registers / GSM.
1952 intel_runtime_pm_put(i915);
1955 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
1957 struct drm_i915_gem_object *obj, *on;
1961 * Only called during RPM suspend. All users of the userfault_list
1962 * must be holding an RPM wakeref to ensure that this can not
1963 * run concurrently with themselves (and use the struct_mutex for
1964 * protection between themselves).
1967 list_for_each_entry_safe(obj, on,
1968 &dev_priv->mm.userfault_list, userfault_link) {
1969 list_del_init(&obj->userfault_link);
1970 drm_vma_node_unmap(&obj->base.vma_node,
1971 obj->base.dev->anon_inode->i_mapping);
1974 /* The fence will be lost when the device powers down. If any were
1975 * in use by hardware (i.e. they are pinned), we should not be powering
1976 * down! All other fences will be reacquired by the user upon waking.
1978 for (i = 0; i < dev_priv->num_fence_regs; i++) {
1979 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1981 if (WARN_ON(reg->pin_count))
1987 GEM_BUG_ON(!list_empty(®->vma->obj->userfault_link));
1993 * i915_gem_get_ggtt_size - return required global GTT size for an object
1994 * @dev_priv: i915 device
1995 * @size: object size
1996 * @tiling_mode: tiling mode
1998 * Return the required global GTT size for an object, taking into account
1999 * potential fence register mapping.
2001 u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
2002 u64 size, int tiling_mode)
2006 GEM_BUG_ON(size == 0);
2008 if (INTEL_GEN(dev_priv) >= 4 ||
2009 tiling_mode == I915_TILING_NONE)
2012 /* Previous chips need a power-of-two fence region when tiling */
2013 if (IS_GEN3(dev_priv))
2014 ggtt_size = 1024*1024;
2016 ggtt_size = 512*1024;
2018 while (ggtt_size < size)
2025 * i915_gem_get_ggtt_alignment - return required global GTT alignment
2026 * @dev_priv: i915 device
2027 * @size: object size
2028 * @tiling_mode: tiling mode
2029 * @fenced: is fenced alignment required or not
2031 * Return the required global GTT alignment for an object, taking into account
2032 * potential fence register mapping.
2034 u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
2035 int tiling_mode, bool fenced)
2037 GEM_BUG_ON(size == 0);
2040 * Minimum alignment is 4k (GTT page size), but might be greater
2041 * if a fence register is needed for the object.
2043 if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
2044 tiling_mode == I915_TILING_NONE)
2048 * Previous chips need to be aligned to the size of the smallest
2049 * fence register that can contain the object.
2051 return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
2054 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2056 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2059 err = drm_gem_create_mmap_offset(&obj->base);
2063 /* We can idle the GPU locklessly to flush stale objects, but in order
2064 * to claim that space for ourselves, we need to take the big
2065 * struct_mutex to free the requests+objects and allocate our slot.
2067 err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
2071 err = i915_mutex_lock_interruptible(&dev_priv->drm);
2073 i915_gem_retire_requests(dev_priv);
2074 err = drm_gem_create_mmap_offset(&obj->base);
2075 mutex_unlock(&dev_priv->drm.struct_mutex);
2081 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2083 drm_gem_free_mmap_offset(&obj->base);
2087 i915_gem_mmap_gtt(struct drm_file *file,
2088 struct drm_device *dev,
2092 struct drm_i915_gem_object *obj;
2095 obj = i915_gem_object_lookup(file, handle);
2099 ret = i915_gem_object_create_mmap_offset(obj);
2101 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2103 i915_gem_object_put_unlocked(obj);
2108 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2110 * @data: GTT mapping ioctl data
2111 * @file: GEM object info
2113 * Simply returns the fake offset to userspace so it can mmap it.
2114 * The mmap call will end up in drm_gem_mmap(), which will set things
2115 * up so we can get faults in the handler above.
2117 * The fault handler will take care of binding the object into the GTT
2118 * (since it may have been evicted to make room for something), allocating
2119 * a fence register, and mapping the appropriate aperture address into
2123 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2124 struct drm_file *file)
2126 struct drm_i915_gem_mmap_gtt *args = data;
2128 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2131 /* Immediately discard the backing storage */
2133 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2135 i915_gem_object_free_mmap_offset(obj);
2137 if (obj->base.filp == NULL)
2140 /* Our goal here is to return as much of the memory as
2141 * is possible back to the system as we are called from OOM.
2142 * To do this we must instruct the shmfs to drop all of its
2143 * backing pages, *now*.
2145 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2146 obj->madv = __I915_MADV_PURGED;
2149 /* Try to discard unwanted pages */
2151 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2153 struct address_space *mapping;
2155 switch (obj->madv) {
2156 case I915_MADV_DONTNEED:
2157 i915_gem_object_truncate(obj);
2158 case __I915_MADV_PURGED:
2162 if (obj->base.filp == NULL)
2165 mapping = obj->base.filp->f_mapping,
2166 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2170 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2172 struct sgt_iter sgt_iter;
2176 BUG_ON(obj->madv == __I915_MADV_PURGED);
2178 ret = i915_gem_object_set_to_cpu_domain(obj, true);
2180 /* In the event of a disaster, abandon all caches and
2181 * hope for the best.
2183 i915_gem_clflush_object(obj, true);
2184 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2187 i915_gem_gtt_finish_object(obj);
2189 if (i915_gem_object_needs_bit17_swizzle(obj))
2190 i915_gem_object_save_bit_17_swizzle(obj);
2192 if (obj->madv == I915_MADV_DONTNEED)
2195 for_each_sgt_page(page, sgt_iter, obj->pages) {
2197 set_page_dirty(page);
2199 if (obj->madv == I915_MADV_WILLNEED)
2200 mark_page_accessed(page);
2206 sg_free_table(obj->pages);
2211 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2213 const struct drm_i915_gem_object_ops *ops = obj->ops;
2215 if (obj->pages == NULL)
2218 if (obj->pages_pin_count)
2221 GEM_BUG_ON(obj->bind_count);
2223 /* ->put_pages might need to allocate memory for the bit17 swizzle
2224 * array, hence protect them from being reaped by removing them from gtt
2226 list_del(&obj->global_list);
2231 ptr = ptr_mask_bits(obj->mapping);
2232 if (is_vmalloc_addr(ptr))
2235 kunmap(kmap_to_page(ptr));
2237 obj->mapping = NULL;
2240 ops->put_pages(obj);
2243 i915_gem_object_invalidate(obj);
2248 static unsigned int swiotlb_max_size(void)
2250 #if IS_ENABLED(CONFIG_SWIOTLB)
2251 return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
2258 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2260 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2262 struct address_space *mapping;
2263 struct sg_table *st;
2264 struct scatterlist *sg;
2265 struct sgt_iter sgt_iter;
2267 unsigned long last_pfn = 0; /* suppress gcc warning */
2268 unsigned int max_segment;
2272 /* Assert that the object is not currently in any GPU domain. As it
2273 * wasn't in the GTT, there shouldn't be any way it could have been in
2276 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2277 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2279 max_segment = swiotlb_max_size();
2281 max_segment = rounddown(UINT_MAX, PAGE_SIZE);
2283 st = kmalloc(sizeof(*st), GFP_KERNEL);
2287 page_count = obj->base.size / PAGE_SIZE;
2288 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2293 /* Get the list of pages out of our struct file. They'll be pinned
2294 * at this point until we release them.
2296 * Fail silently without starting the shrinker
2298 mapping = obj->base.filp->f_mapping;
2299 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2300 gfp |= __GFP_NORETRY | __GFP_NOWARN;
2303 for (i = 0; i < page_count; i++) {
2304 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2306 i915_gem_shrink(dev_priv,
2309 I915_SHRINK_UNBOUND |
2310 I915_SHRINK_PURGEABLE);
2311 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2314 /* We've tried hard to allocate the memory by reaping
2315 * our own buffer, now let the real VM do its job and
2316 * go down in flames if truly OOM.
2318 page = shmem_read_mapping_page(mapping, i);
2320 ret = PTR_ERR(page);
2325 sg->length >= max_segment ||
2326 page_to_pfn(page) != last_pfn + 1) {
2330 sg_set_page(sg, page, PAGE_SIZE, 0);
2332 sg->length += PAGE_SIZE;
2334 last_pfn = page_to_pfn(page);
2336 /* Check that the i965g/gm workaround works. */
2337 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2339 if (sg) /* loop terminated early; short sg table */
2343 ret = i915_gem_gtt_prepare_object(obj);
2347 if (i915_gem_object_needs_bit17_swizzle(obj))
2348 i915_gem_object_do_bit_17_swizzle(obj);
2350 if (i915_gem_object_is_tiled(obj) &&
2351 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2352 i915_gem_object_pin_pages(obj);
2358 for_each_sgt_page(page, sgt_iter, st)
2363 /* shmemfs first checks if there is enough memory to allocate the page
2364 * and reports ENOSPC should there be insufficient, along with the usual
2365 * ENOMEM for a genuine allocation failure.
2367 * We use ENOSPC in our driver to mean that we have run out of aperture
2368 * space and so want to translate the error from shmemfs back to our
2369 * usual understanding of ENOMEM.
2377 /* Ensure that the associated pages are gathered from the backing storage
2378 * and pinned into our object. i915_gem_object_get_pages() may be called
2379 * multiple times before they are released by a single call to
2380 * i915_gem_object_put_pages() - once the pages are no longer referenced
2381 * either as a result of memory pressure (reaping pages under the shrinker)
2382 * or as the object is itself released.
2385 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2387 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2388 const struct drm_i915_gem_object_ops *ops = obj->ops;
2394 if (obj->madv != I915_MADV_WILLNEED) {
2395 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2399 BUG_ON(obj->pages_pin_count);
2401 ret = ops->get_pages(obj);
2405 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2407 obj->get_page.sg = obj->pages->sgl;
2408 obj->get_page.last = 0;
2413 /* The 'mapping' part of i915_gem_object_pin_map() below */
2414 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2415 enum i915_map_type type)
2417 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2418 struct sg_table *sgt = obj->pages;
2419 struct sgt_iter sgt_iter;
2421 struct page *stack_pages[32];
2422 struct page **pages = stack_pages;
2423 unsigned long i = 0;
2427 /* A single page can always be kmapped */
2428 if (n_pages == 1 && type == I915_MAP_WB)
2429 return kmap(sg_page(sgt->sgl));
2431 if (n_pages > ARRAY_SIZE(stack_pages)) {
2432 /* Too big for stack -- allocate temporary array instead */
2433 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2438 for_each_sgt_page(page, sgt_iter, sgt)
2441 /* Check that we have the expected number of pages */
2442 GEM_BUG_ON(i != n_pages);
2446 pgprot = PAGE_KERNEL;
2449 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2452 addr = vmap(pages, n_pages, 0, pgprot);
2454 if (pages != stack_pages)
2455 drm_free_large(pages);
2460 /* get, pin, and map the pages of the object into kernel space */
2461 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2462 enum i915_map_type type)
2464 enum i915_map_type has_type;
2469 lockdep_assert_held(&obj->base.dev->struct_mutex);
2470 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
2472 ret = i915_gem_object_get_pages(obj);
2474 return ERR_PTR(ret);
2476 i915_gem_object_pin_pages(obj);
2477 pinned = obj->pages_pin_count > 1;
2479 ptr = ptr_unpack_bits(obj->mapping, has_type);
2480 if (ptr && has_type != type) {
2486 if (is_vmalloc_addr(ptr))
2489 kunmap(kmap_to_page(ptr));
2491 ptr = obj->mapping = NULL;
2495 ptr = i915_gem_object_map(obj, type);
2501 obj->mapping = ptr_pack_bits(ptr, type);
2507 i915_gem_object_unpin_pages(obj);
2508 return ERR_PTR(ret);
2512 i915_gem_object_retire__write(struct i915_gem_active *active,
2513 struct drm_i915_gem_request *request)
2515 struct drm_i915_gem_object *obj =
2516 container_of(active, struct drm_i915_gem_object, last_write);
2518 intel_fb_obj_flush(obj, true, ORIGIN_CS);
2522 i915_gem_object_retire__read(struct i915_gem_active *active,
2523 struct drm_i915_gem_request *request)
2525 int idx = request->engine->id;
2526 struct drm_i915_gem_object *obj =
2527 container_of(active, struct drm_i915_gem_object, last_read[idx]);
2529 GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
2531 i915_gem_object_clear_active(obj, idx);
2532 if (i915_gem_object_is_active(obj))
2535 /* Bump our place on the bound list to keep it roughly in LRU order
2536 * so that we don't steal from recently used but inactive objects
2537 * (unless we are forced to ofc!)
2539 if (obj->bind_count)
2540 list_move_tail(&obj->global_list,
2541 &request->i915->mm.bound_list);
2543 i915_gem_object_put(obj);
2546 static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2548 unsigned long elapsed;
2550 if (ctx->hang_stats.banned)
2553 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2554 if (ctx->hang_stats.ban_period_seconds &&
2555 elapsed <= ctx->hang_stats.ban_period_seconds) {
2556 DRM_DEBUG("context hanging too fast, banning!\n");
2563 static void i915_set_reset_status(struct i915_gem_context *ctx,
2566 struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
2569 hs->banned = i915_context_is_banned(ctx);
2571 hs->guilty_ts = get_seconds();
2573 hs->batch_pending++;
2577 struct drm_i915_gem_request *
2578 i915_gem_find_active_request(struct intel_engine_cs *engine)
2580 struct drm_i915_gem_request *request;
2582 /* We are called by the error capture and reset at a random
2583 * point in time. In particular, note that neither is crucially
2584 * ordered with an interrupt. After a hang, the GPU is dead and we
2585 * assume that no more writes can happen (we waited long enough for
2586 * all writes that were in transaction to be flushed) - adding an
2587 * extra delay for a recent interrupt is pointless. Hence, we do
2588 * not need an engine->irq_seqno_barrier() before the seqno reads.
2590 list_for_each_entry(request, &engine->request_list, link) {
2591 if (i915_gem_request_completed(request))
2594 if (!i915_sw_fence_done(&request->submit))
2603 static void reset_request(struct drm_i915_gem_request *request)
2605 void *vaddr = request->ring->vaddr;
2608 /* As this request likely depends on state from the lost
2609 * context, clear out all the user operations leaving the
2610 * breadcrumb at the end (so we get the fence notifications).
2612 head = request->head;
2613 if (request->postfix < head) {
2614 memset(vaddr + head, 0, request->ring->size - head);
2617 memset(vaddr + head, 0, request->postfix - head);
2620 static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2622 struct drm_i915_gem_request *request;
2623 struct i915_gem_context *incomplete_ctx;
2626 if (engine->irq_seqno_barrier)
2627 engine->irq_seqno_barrier(engine);
2629 request = i915_gem_find_active_request(engine);
2633 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2634 if (engine->hangcheck.seqno != intel_engine_get_seqno(engine))
2637 i915_set_reset_status(request->ctx, ring_hung);
2641 DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
2642 engine->name, request->fence.seqno);
2644 /* Setup the CS to resume from the breadcrumb of the hung request */
2645 engine->reset_hw(engine, request);
2647 /* Users of the default context do not rely on logical state
2648 * preserved between batches. They have to emit full state on
2649 * every batch and so it is safe to execute queued requests following
2652 * Other contexts preserve state, now corrupt. We want to skip all
2653 * queued requests that reference the corrupt context.
2655 incomplete_ctx = request->ctx;
2656 if (i915_gem_context_is_default(incomplete_ctx))
2659 list_for_each_entry_continue(request, &engine->request_list, link)
2660 if (request->ctx == incomplete_ctx)
2661 reset_request(request);
2664 void i915_gem_reset(struct drm_i915_private *dev_priv)
2666 struct intel_engine_cs *engine;
2667 enum intel_engine_id id;
2669 i915_gem_retire_requests(dev_priv);
2671 for_each_engine(engine, dev_priv, id)
2672 i915_gem_reset_engine(engine);
2674 i915_gem_restore_fences(&dev_priv->drm);
2676 if (dev_priv->gt.awake) {
2677 intel_sanitize_gt_powersave(dev_priv);
2678 intel_enable_gt_powersave(dev_priv);
2679 if (INTEL_GEN(dev_priv) >= 6)
2680 gen6_rps_busy(dev_priv);
2684 static void nop_submit_request(struct drm_i915_gem_request *request)
2688 static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
2690 engine->submit_request = nop_submit_request;
2692 /* Mark all pending requests as complete so that any concurrent
2693 * (lockless) lookup doesn't try and wait upon the request as we
2696 intel_engine_init_seqno(engine, engine->last_submitted_seqno);
2699 * Clear the execlists queue up before freeing the requests, as those
2700 * are the ones that keep the context and ringbuffer backing objects
2704 if (i915.enable_execlists) {
2705 spin_lock(&engine->execlist_lock);
2706 INIT_LIST_HEAD(&engine->execlist_queue);
2707 i915_gem_request_put(engine->execlist_port[0].request);
2708 i915_gem_request_put(engine->execlist_port[1].request);
2709 memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
2710 spin_unlock(&engine->execlist_lock);
2713 engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
2716 void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
2718 struct intel_engine_cs *engine;
2719 enum intel_engine_id id;
2721 lockdep_assert_held(&dev_priv->drm.struct_mutex);
2722 set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
2724 i915_gem_context_lost(dev_priv);
2725 for_each_engine(engine, dev_priv, id)
2726 i915_gem_cleanup_engine(engine);
2727 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2729 i915_gem_retire_requests(dev_priv);
2733 i915_gem_retire_work_handler(struct work_struct *work)
2735 struct drm_i915_private *dev_priv =
2736 container_of(work, typeof(*dev_priv), gt.retire_work.work);
2737 struct drm_device *dev = &dev_priv->drm;
2739 /* Come back later if the device is busy... */
2740 if (mutex_trylock(&dev->struct_mutex)) {
2741 i915_gem_retire_requests(dev_priv);
2742 mutex_unlock(&dev->struct_mutex);
2745 /* Keep the retire handler running until we are finally idle.
2746 * We do not need to do this test under locking as in the worst-case
2747 * we queue the retire worker once too often.
2749 if (READ_ONCE(dev_priv->gt.awake)) {
2750 i915_queue_hangcheck(dev_priv);
2751 queue_delayed_work(dev_priv->wq,
2752 &dev_priv->gt.retire_work,
2753 round_jiffies_up_relative(HZ));
2758 i915_gem_idle_work_handler(struct work_struct *work)
2760 struct drm_i915_private *dev_priv =
2761 container_of(work, typeof(*dev_priv), gt.idle_work.work);
2762 struct drm_device *dev = &dev_priv->drm;
2763 struct intel_engine_cs *engine;
2764 enum intel_engine_id id;
2765 bool rearm_hangcheck;
2767 if (!READ_ONCE(dev_priv->gt.awake))
2770 if (READ_ONCE(dev_priv->gt.active_engines))
2774 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
2776 if (!mutex_trylock(&dev->struct_mutex)) {
2777 /* Currently busy, come back later */
2778 mod_delayed_work(dev_priv->wq,
2779 &dev_priv->gt.idle_work,
2780 msecs_to_jiffies(50));
2784 if (dev_priv->gt.active_engines)
2787 for_each_engine(engine, dev_priv, id)
2788 i915_gem_batch_pool_fini(&engine->batch_pool);
2790 GEM_BUG_ON(!dev_priv->gt.awake);
2791 dev_priv->gt.awake = false;
2792 rearm_hangcheck = false;
2794 if (INTEL_GEN(dev_priv) >= 6)
2795 gen6_rps_idle(dev_priv);
2796 intel_runtime_pm_put(dev_priv);
2798 mutex_unlock(&dev->struct_mutex);
2801 if (rearm_hangcheck) {
2802 GEM_BUG_ON(!dev_priv->gt.awake);
2803 i915_queue_hangcheck(dev_priv);
2807 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
2809 struct drm_i915_gem_object *obj = to_intel_bo(gem);
2810 struct drm_i915_file_private *fpriv = file->driver_priv;
2811 struct i915_vma *vma, *vn;
2813 mutex_lock(&obj->base.dev->struct_mutex);
2814 list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
2815 if (vma->vm->file == fpriv)
2816 i915_vma_close(vma);
2817 mutex_unlock(&obj->base.dev->struct_mutex);
2821 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2822 * @dev: drm device pointer
2823 * @data: ioctl data blob
2824 * @file: drm file pointer
2826 * Returns 0 if successful, else an error is returned with the remaining time in
2827 * the timeout parameter.
2828 * -ETIME: object is still busy after timeout
2829 * -ERESTARTSYS: signal interrupted the wait
2830 * -ENONENT: object doesn't exist
2831 * Also possible, but rare:
2832 * -EAGAIN: GPU wedged
2834 * -ENODEV: Internal IRQ fail
2835 * -E?: The add request failed
2837 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2838 * non-zero timeout parameter the wait ioctl will wait for the given number of
2839 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2840 * without holding struct_mutex the object may become re-busied before this
2841 * function completes. A similar but shorter * race condition exists in the busy
2845 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2847 struct drm_i915_gem_wait *args = data;
2848 struct intel_rps_client *rps = to_rps_client(file);
2849 struct drm_i915_gem_object *obj;
2850 unsigned long active;
2853 if (args->flags != 0)
2856 obj = i915_gem_object_lookup(file, args->bo_handle);
2860 active = __I915_BO_ACTIVE(obj);
2861 for_each_active(active, idx) {
2862 s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
2863 ret = i915_gem_active_wait_unlocked(&obj->last_read[idx],
2864 I915_WAIT_INTERRUPTIBLE,
2870 i915_gem_object_put_unlocked(obj);
2874 static void __i915_vma_iounmap(struct i915_vma *vma)
2876 GEM_BUG_ON(i915_vma_is_pinned(vma));
2878 if (vma->iomap == NULL)
2881 io_mapping_unmap(vma->iomap);
2885 int i915_vma_unbind(struct i915_vma *vma)
2887 struct drm_i915_gem_object *obj = vma->obj;
2888 unsigned long active;
2891 /* First wait upon any activity as retiring the request may
2892 * have side-effects such as unpinning or even unbinding this vma.
2894 active = i915_vma_get_active(vma);
2898 /* When a closed VMA is retired, it is unbound - eek.
2899 * In order to prevent it from being recursively closed,
2900 * take a pin on the vma so that the second unbind is
2903 __i915_vma_pin(vma);
2905 for_each_active(active, idx) {
2906 ret = i915_gem_active_retire(&vma->last_read[idx],
2907 &vma->vm->dev->struct_mutex);
2912 __i915_vma_unpin(vma);
2916 GEM_BUG_ON(i915_vma_is_active(vma));
2919 if (i915_vma_is_pinned(vma))
2922 if (!drm_mm_node_allocated(&vma->node))
2925 GEM_BUG_ON(obj->bind_count == 0);
2926 GEM_BUG_ON(!obj->pages);
2928 if (i915_vma_is_map_and_fenceable(vma)) {
2929 /* release the fence reg _after_ flushing */
2930 ret = i915_vma_put_fence(vma);
2934 /* Force a pagefault for domain tracking on next user access */
2935 i915_gem_release_mmap(obj);
2937 __i915_vma_iounmap(vma);
2938 vma->flags &= ~I915_VMA_CAN_FENCE;
2941 if (likely(!vma->vm->closed)) {
2942 trace_i915_vma_unbind(vma);
2943 vma->vm->unbind_vma(vma);
2945 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
2947 drm_mm_remove_node(&vma->node);
2948 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
2950 if (vma->pages != obj->pages) {
2951 GEM_BUG_ON(!vma->pages);
2952 sg_free_table(vma->pages);
2957 /* Since the unbound list is global, only move to that list if
2958 * no more VMAs exist. */
2959 if (--obj->bind_count == 0)
2960 list_move_tail(&obj->global_list,
2961 &to_i915(obj->base.dev)->mm.unbound_list);
2963 /* And finally now the object is completely decoupled from this vma,
2964 * we can drop its hold on the backing storage and allow it to be
2965 * reaped by the shrinker.
2967 i915_gem_object_unpin_pages(obj);
2970 if (unlikely(i915_vma_is_closed(vma)))
2971 i915_vma_destroy(vma);
2976 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
2979 struct intel_engine_cs *engine;
2980 enum intel_engine_id id;
2983 for_each_engine(engine, dev_priv, id) {
2984 if (engine->last_context == NULL)
2987 ret = intel_engine_idle(engine, flags);
2995 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
2996 unsigned long cache_level)
2998 struct drm_mm_node *gtt_space = &vma->node;
2999 struct drm_mm_node *other;
3002 * On some machines we have to be careful when putting differing types
3003 * of snoopable memory together to avoid the prefetcher crossing memory
3004 * domains and dying. During vm initialisation, we decide whether or not
3005 * these constraints apply and set the drm_mm.color_adjust
3008 if (vma->vm->mm.color_adjust == NULL)
3011 if (!drm_mm_node_allocated(gtt_space))
3014 if (list_empty(>t_space->node_list))
3017 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3018 if (other->allocated && !other->hole_follows && other->color != cache_level)
3021 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3022 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3029 * i915_vma_insert - finds a slot for the vma in its address space
3031 * @size: requested size in bytes (can be larger than the VMA)
3032 * @alignment: required alignment
3033 * @flags: mask of PIN_* flags to use
3035 * First we try to allocate some free space that meets the requirements for
3036 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
3037 * preferrably the oldest idle entry to make room for the new VMA.
3040 * 0 on success, negative error code otherwise.
3043 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
3045 struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
3046 struct drm_i915_gem_object *obj = vma->obj;
3050 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
3051 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
3053 size = max(size, vma->size);
3054 if (flags & PIN_MAPPABLE)
3055 size = i915_gem_get_ggtt_size(dev_priv, size,
3056 i915_gem_object_get_tiling(obj));
3058 alignment = max(max(alignment, vma->display_alignment),
3059 i915_gem_get_ggtt_alignment(dev_priv, size,
3060 i915_gem_object_get_tiling(obj),
3061 flags & PIN_MAPPABLE));
3063 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3065 end = vma->vm->total;
3066 if (flags & PIN_MAPPABLE)
3067 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
3068 if (flags & PIN_ZONE_4G)
3069 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
3071 /* If binding the object/GGTT view requires more space than the entire
3072 * aperture has, reject it early before evicting everything in a vain
3073 * attempt to find space.
3076 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
3077 size, obj->base.size,
3078 flags & PIN_MAPPABLE ? "mappable" : "total",
3083 ret = i915_gem_object_get_pages(obj);
3087 i915_gem_object_pin_pages(obj);
3089 if (flags & PIN_OFFSET_FIXED) {
3090 u64 offset = flags & PIN_OFFSET_MASK;
3091 if (offset & (alignment - 1) || offset > end - size) {
3096 vma->node.start = offset;
3097 vma->node.size = size;
3098 vma->node.color = obj->cache_level;
3099 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3101 ret = i915_gem_evict_for_vma(vma);
3103 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3108 u32 search_flag, alloc_flag;
3110 if (flags & PIN_HIGH) {
3111 search_flag = DRM_MM_SEARCH_BELOW;
3112 alloc_flag = DRM_MM_CREATE_TOP;
3114 search_flag = DRM_MM_SEARCH_DEFAULT;
3115 alloc_flag = DRM_MM_CREATE_DEFAULT;
3118 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3119 * so we know that we always have a minimum alignment of 4096.
3120 * The drm_mm range manager is optimised to return results
3121 * with zero alignment, so where possible use the optimal
3124 if (alignment <= 4096)
3128 ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
3136 ret = i915_gem_evict_something(vma->vm, size, alignment,
3146 GEM_BUG_ON(vma->node.start < start);
3147 GEM_BUG_ON(vma->node.start + vma->node.size > end);
3149 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
3151 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3152 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3158 i915_gem_object_unpin_pages(obj);
3163 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3166 /* If we don't have a page list set up, then we're not pinned
3167 * to GPU, and we can ignore the cache flush because it'll happen
3168 * again at bind time.
3170 if (obj->pages == NULL)
3174 * Stolen memory is always coherent with the GPU as it is explicitly
3175 * marked as wc by the system, or the system is cache-coherent.
3177 if (obj->stolen || obj->phys_handle)
3180 /* If the GPU is snooping the contents of the CPU cache,
3181 * we do not need to manually clear the CPU cache lines. However,
3182 * the caches are only snooped when the render cache is
3183 * flushed/invalidated. As we always have to emit invalidations
3184 * and flushes when moving into and out of the RENDER domain, correct
3185 * snooping behaviour occurs naturally as the result of our domain
3188 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3189 obj->cache_dirty = true;
3193 trace_i915_gem_object_clflush(obj);
3194 drm_clflush_sg(obj->pages);
3195 obj->cache_dirty = false;
3200 /** Flushes the GTT write domain for the object if it's dirty. */
3202 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3204 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3206 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3209 /* No actual flushing is required for the GTT write domain. Writes
3210 * to it "immediately" go to main memory as far as we know, so there's
3211 * no chipset flush. It also doesn't land in render cache.
3213 * However, we do have to enforce the order so that all writes through
3214 * the GTT land before any writes to the device, such as updates to
3217 * We also have to wait a bit for the writes to land from the GTT.
3218 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
3219 * timing. This issue has only been observed when switching quickly
3220 * between GTT writes and CPU reads from inside the kernel on recent hw,
3221 * and it appears to only affect discrete GTT blocks (i.e. on LLC
3222 * system agents we cannot reproduce this behaviour).
3225 if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
3226 POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
3228 intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
3230 obj->base.write_domain = 0;
3231 trace_i915_gem_object_change_domain(obj,
3232 obj->base.read_domains,
3233 I915_GEM_DOMAIN_GTT);
3236 /** Flushes the CPU write domain for the object if it's dirty. */
3238 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3240 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3243 if (i915_gem_clflush_object(obj, obj->pin_display))
3244 i915_gem_chipset_flush(to_i915(obj->base.dev));
3246 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3248 obj->base.write_domain = 0;
3249 trace_i915_gem_object_change_domain(obj,
3250 obj->base.read_domains,
3251 I915_GEM_DOMAIN_CPU);
3254 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
3256 struct i915_vma *vma;
3258 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3259 if (!i915_vma_is_ggtt(vma))
3262 if (i915_vma_is_active(vma))
3265 if (!drm_mm_node_allocated(&vma->node))
3268 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3273 * Moves a single object to the GTT read, and possibly write domain.
3274 * @obj: object to act on
3275 * @write: ask for write access or read only
3277 * This function returns when the move is complete, including waiting on
3281 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3283 uint32_t old_write_domain, old_read_domains;
3286 ret = i915_gem_object_wait_rendering(obj, !write);
3290 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3293 /* Flush and acquire obj->pages so that we are coherent through
3294 * direct access in memory with previous cached writes through
3295 * shmemfs and that our cache domain tracking remains valid.
3296 * For example, if the obj->filp was moved to swap without us
3297 * being notified and releasing the pages, we would mistakenly
3298 * continue to assume that the obj remained out of the CPU cached
3301 ret = i915_gem_object_get_pages(obj);
3305 i915_gem_object_flush_cpu_write_domain(obj);
3307 /* Serialise direct access to this object with the barriers for
3308 * coherent writes from the GPU, by effectively invalidating the
3309 * GTT domain upon first access.
3311 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3314 old_write_domain = obj->base.write_domain;
3315 old_read_domains = obj->base.read_domains;
3317 /* It should now be out of any other write domains, and we can update
3318 * the domain values for our changes.
3320 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3321 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3323 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3324 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3328 trace_i915_gem_object_change_domain(obj,
3332 /* And bump the LRU for this access */
3333 i915_gem_object_bump_inactive_ggtt(obj);
3339 * Changes the cache-level of an object across all VMA.
3340 * @obj: object to act on
3341 * @cache_level: new cache level to set for the object
3343 * After this function returns, the object will be in the new cache-level
3344 * across all GTT and the contents of the backing storage will be coherent,
3345 * with respect to the new cache-level. In order to keep the backing storage
3346 * coherent for all users, we only allow a single cache level to be set
3347 * globally on the object and prevent it from being changed whilst the
3348 * hardware is reading from the object. That is if the object is currently
3349 * on the scanout it will be set to uncached (or equivalent display
3350 * cache coherency) and all non-MOCS GPU access will also be uncached so
3351 * that all direct access to the scanout remains coherent.
3353 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3354 enum i915_cache_level cache_level)
3356 struct i915_vma *vma;
3359 if (obj->cache_level == cache_level)
3362 /* Inspect the list of currently bound VMA and unbind any that would
3363 * be invalid given the new cache-level. This is principally to
3364 * catch the issue of the CS prefetch crossing page boundaries and
3365 * reading an invalid PTE on older architectures.
3368 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3369 if (!drm_mm_node_allocated(&vma->node))
3372 if (i915_vma_is_pinned(vma)) {
3373 DRM_DEBUG("can not change the cache level of pinned objects\n");
3377 if (i915_gem_valid_gtt_space(vma, cache_level))
3380 ret = i915_vma_unbind(vma);
3384 /* As unbinding may affect other elements in the
3385 * obj->vma_list (due to side-effects from retiring
3386 * an active vma), play safe and restart the iterator.
3391 /* We can reuse the existing drm_mm nodes but need to change the
3392 * cache-level on the PTE. We could simply unbind them all and
3393 * rebind with the correct cache-level on next use. However since
3394 * we already have a valid slot, dma mapping, pages etc, we may as
3395 * rewrite the PTE in the belief that doing so tramples upon less
3396 * state and so involves less work.
3398 if (obj->bind_count) {
3399 /* Before we change the PTE, the GPU must not be accessing it.
3400 * If we wait upon the object, we know that all the bound
3401 * VMA are no longer active.
3403 ret = i915_gem_object_wait_rendering(obj, false);
3407 if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
3408 /* Access to snoopable pages through the GTT is
3409 * incoherent and on some machines causes a hard
3410 * lockup. Relinquish the CPU mmaping to force
3411 * userspace to refault in the pages and we can
3412 * then double check if the GTT mapping is still
3413 * valid for that pointer access.
3415 i915_gem_release_mmap(obj);
3417 /* As we no longer need a fence for GTT access,
3418 * we can relinquish it now (and so prevent having
3419 * to steal a fence from someone else on the next
3420 * fence request). Note GPU activity would have
3421 * dropped the fence as all snoopable access is
3422 * supposed to be linear.
3424 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3425 ret = i915_vma_put_fence(vma);
3430 /* We either have incoherent backing store and
3431 * so no GTT access or the architecture is fully
3432 * coherent. In such cases, existing GTT mmaps
3433 * ignore the cache bit in the PTE and we can
3434 * rewrite it without confusing the GPU or having
3435 * to force userspace to fault back in its mmaps.
3439 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3440 if (!drm_mm_node_allocated(&vma->node))
3443 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3449 list_for_each_entry(vma, &obj->vma_list, obj_link)
3450 vma->node.color = cache_level;
3451 obj->cache_level = cache_level;
3454 /* Flush the dirty CPU caches to the backing storage so that the
3455 * object is now coherent at its new cache level (with respect
3456 * to the access domain).
3458 if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
3459 if (i915_gem_clflush_object(obj, true))
3460 i915_gem_chipset_flush(to_i915(obj->base.dev));
3466 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3467 struct drm_file *file)
3469 struct drm_i915_gem_caching *args = data;
3470 struct drm_i915_gem_object *obj;
3472 obj = i915_gem_object_lookup(file, args->handle);
3476 switch (obj->cache_level) {
3477 case I915_CACHE_LLC:
3478 case I915_CACHE_L3_LLC:
3479 args->caching = I915_CACHING_CACHED;
3483 args->caching = I915_CACHING_DISPLAY;
3487 args->caching = I915_CACHING_NONE;
3491 i915_gem_object_put_unlocked(obj);
3495 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3496 struct drm_file *file)
3498 struct drm_i915_private *i915 = to_i915(dev);
3499 struct drm_i915_gem_caching *args = data;
3500 struct drm_i915_gem_object *obj;
3501 enum i915_cache_level level;
3504 switch (args->caching) {
3505 case I915_CACHING_NONE:
3506 level = I915_CACHE_NONE;
3508 case I915_CACHING_CACHED:
3510 * Due to a HW issue on BXT A stepping, GPU stores via a
3511 * snooped mapping may leave stale data in a corresponding CPU
3512 * cacheline, whereas normally such cachelines would get
3515 if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3518 level = I915_CACHE_LLC;
3520 case I915_CACHING_DISPLAY:
3521 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3527 ret = i915_mutex_lock_interruptible(dev);
3531 obj = i915_gem_object_lookup(file, args->handle);
3537 ret = i915_gem_object_set_cache_level(obj, level);
3538 i915_gem_object_put(obj);
3540 mutex_unlock(&dev->struct_mutex);
3545 * Prepare buffer for display plane (scanout, cursors, etc).
3546 * Can be called from an uninterruptible phase (modesetting) and allows
3547 * any flushes to be pipelined (for pageflips).
3550 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3552 const struct i915_ggtt_view *view)
3554 struct i915_vma *vma;
3555 u32 old_read_domains, old_write_domain;
3558 /* Mark the pin_display early so that we account for the
3559 * display coherency whilst setting up the cache domains.
3563 /* The display engine is not coherent with the LLC cache on gen6. As
3564 * a result, we make sure that the pinning that is about to occur is
3565 * done with uncached PTEs. This is lowest common denominator for all
3568 * However for gen6+, we could do better by using the GFDT bit instead
3569 * of uncaching, which would allow us to flush all the LLC-cached data
3570 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3572 ret = i915_gem_object_set_cache_level(obj,
3573 HAS_WT(to_i915(obj->base.dev)) ?
3574 I915_CACHE_WT : I915_CACHE_NONE);
3577 goto err_unpin_display;
3580 /* As the user may map the buffer once pinned in the display plane
3581 * (e.g. libkms for the bootup splash), we have to ensure that we
3582 * always use map_and_fenceable for all scanout buffers. However,
3583 * it may simply be too big to fit into mappable, in which case
3584 * put it anyway and hope that userspace can cope (but always first
3585 * try to preserve the existing ABI).
3587 vma = ERR_PTR(-ENOSPC);
3588 if (view->type == I915_GGTT_VIEW_NORMAL)
3589 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3590 PIN_MAPPABLE | PIN_NONBLOCK);
3592 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0);
3594 goto err_unpin_display;
3596 vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3598 WARN_ON(obj->pin_display > i915_vma_pin_count(vma));
3600 i915_gem_object_flush_cpu_write_domain(obj);
3602 old_write_domain = obj->base.write_domain;
3603 old_read_domains = obj->base.read_domains;
3605 /* It should now be out of any other write domains, and we can update
3606 * the domain values for our changes.
3608 obj->base.write_domain = 0;
3609 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3611 trace_i915_gem_object_change_domain(obj,
3623 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3625 if (WARN_ON(vma->obj->pin_display == 0))
3628 if (--vma->obj->pin_display == 0)
3629 vma->display_alignment = 0;
3631 /* Bump the LRU to try and avoid premature eviction whilst flipping */
3632 if (!i915_vma_is_active(vma))
3633 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3635 i915_vma_unpin(vma);
3636 WARN_ON(vma->obj->pin_display > i915_vma_pin_count(vma));
3640 * Moves a single object to the CPU read, and possibly write domain.
3641 * @obj: object to act on
3642 * @write: requesting write or read-only access
3644 * This function returns when the move is complete, including waiting on
3648 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3650 uint32_t old_write_domain, old_read_domains;
3653 ret = i915_gem_object_wait_rendering(obj, !write);
3657 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3660 i915_gem_object_flush_gtt_write_domain(obj);
3662 old_write_domain = obj->base.write_domain;
3663 old_read_domains = obj->base.read_domains;
3665 /* Flush the CPU cache if it's still invalid. */
3666 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3667 i915_gem_clflush_object(obj, false);
3669 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3672 /* It should now be out of any other write domains, and we can update
3673 * the domain values for our changes.
3675 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3677 /* If we're writing through the CPU, then the GPU read domains will
3678 * need to be invalidated at next use.
3681 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3682 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3685 trace_i915_gem_object_change_domain(obj,
3692 /* Throttle our rendering by waiting until the ring has completed our requests
3693 * emitted over 20 msec ago.
3695 * Note that if we were to use the current jiffies each time around the loop,
3696 * we wouldn't escape the function with any frames outstanding if the time to
3697 * render a frame was over 20ms.
3699 * This should get us reasonable parallelism between CPU and GPU but also
3700 * relatively low latency when blocking on a particular request to finish.
3703 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3705 struct drm_i915_private *dev_priv = to_i915(dev);
3706 struct drm_i915_file_private *file_priv = file->driver_priv;
3707 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3708 struct drm_i915_gem_request *request, *target = NULL;
3711 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3715 /* ABI: return -EIO if already wedged */
3716 if (i915_terminally_wedged(&dev_priv->gpu_error))
3719 spin_lock(&file_priv->mm.lock);
3720 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3721 if (time_after_eq(request->emitted_jiffies, recent_enough))
3725 * Note that the request might not have been submitted yet.
3726 * In which case emitted_jiffies will be zero.
3728 if (!request->emitted_jiffies)
3734 i915_gem_request_get(target);
3735 spin_unlock(&file_priv->mm.lock);
3740 ret = i915_wait_request(target, I915_WAIT_INTERRUPTIBLE, NULL, NULL);
3741 i915_gem_request_put(target);
3747 i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
3749 if (!drm_mm_node_allocated(&vma->node))
3752 if (vma->node.size < size)
3755 if (alignment && vma->node.start & (alignment - 1))
3758 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
3761 if (flags & PIN_OFFSET_BIAS &&
3762 vma->node.start < (flags & PIN_OFFSET_MASK))
3765 if (flags & PIN_OFFSET_FIXED &&
3766 vma->node.start != (flags & PIN_OFFSET_MASK))
3772 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
3774 struct drm_i915_gem_object *obj = vma->obj;
3775 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3776 bool mappable, fenceable;
3777 u32 fence_size, fence_alignment;
3779 fence_size = i915_gem_get_ggtt_size(dev_priv,
3781 i915_gem_object_get_tiling(obj));
3782 fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
3784 i915_gem_object_get_tiling(obj),
3787 fenceable = (vma->node.size == fence_size &&
3788 (vma->node.start & (fence_alignment - 1)) == 0);
3790 mappable = (vma->node.start + fence_size <=
3791 dev_priv->ggtt.mappable_end);
3793 if (mappable && fenceable)
3794 vma->flags |= I915_VMA_CAN_FENCE;
3796 vma->flags &= ~I915_VMA_CAN_FENCE;
3799 int __i915_vma_do_pin(struct i915_vma *vma,
3800 u64 size, u64 alignment, u64 flags)
3802 unsigned int bound = vma->flags;
3805 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
3806 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
3808 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
3813 if ((bound & I915_VMA_BIND_MASK) == 0) {
3814 ret = i915_vma_insert(vma, size, alignment, flags);
3819 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
3823 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
3824 __i915_vma_set_map_and_fenceable(vma);
3826 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
3830 __i915_vma_unpin(vma);
3835 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3836 const struct i915_ggtt_view *view,
3841 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3842 struct i915_address_space *vm = &dev_priv->ggtt.base;
3843 struct i915_vma *vma;
3846 vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
3850 if (i915_vma_misplaced(vma, size, alignment, flags)) {
3851 if (flags & PIN_NONBLOCK &&
3852 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
3853 return ERR_PTR(-ENOSPC);
3855 if (flags & PIN_MAPPABLE) {
3858 fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
3859 i915_gem_object_get_tiling(obj));
3860 /* If the required space is larger than the available
3861 * aperture, we will not able to find a slot for the
3862 * object and unbinding the object now will be in
3863 * vain. Worse, doing so may cause us to ping-pong
3864 * the object in and out of the Global GTT and
3865 * waste a lot of cycles under the mutex.
3867 if (fence_size > dev_priv->ggtt.mappable_end)
3868 return ERR_PTR(-E2BIG);
3870 /* If NONBLOCK is set the caller is optimistically
3871 * trying to cache the full object within the mappable
3872 * aperture, and *must* have a fallback in place for
3873 * situations where we cannot bind the object. We
3874 * can be a little more lax here and use the fallback
3875 * more often to avoid costly migrations of ourselves
3876 * and other objects within the aperture.
3878 * Half-the-aperture is used as a simple heuristic.
3879 * More interesting would to do search for a free
3880 * block prior to making the commitment to unbind.
3881 * That caters for the self-harm case, and with a
3882 * little more heuristics (e.g. NOFAULT, NOEVICT)
3883 * we could try to minimise harm to others.
3885 if (flags & PIN_NONBLOCK &&
3886 fence_size > dev_priv->ggtt.mappable_end / 2)
3887 return ERR_PTR(-ENOSPC);
3890 WARN(i915_vma_is_pinned(vma),
3891 "bo is already pinned in ggtt with incorrect alignment:"
3892 " offset=%08x, req.alignment=%llx,"
3893 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
3894 i915_ggtt_offset(vma), alignment,
3895 !!(flags & PIN_MAPPABLE),
3896 i915_vma_is_map_and_fenceable(vma));
3897 ret = i915_vma_unbind(vma);
3899 return ERR_PTR(ret);
3902 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
3904 return ERR_PTR(ret);
3909 static __always_inline unsigned int __busy_read_flag(unsigned int id)
3911 /* Note that we could alias engines in the execbuf API, but
3912 * that would be very unwise as it prevents userspace from
3913 * fine control over engine selection. Ahem.
3915 * This should be something like EXEC_MAX_ENGINE instead of
3918 BUILD_BUG_ON(I915_NUM_ENGINES > 16);
3919 return 0x10000 << id;
3922 static __always_inline unsigned int __busy_write_id(unsigned int id)
3924 /* The uABI guarantees an active writer is also amongst the read
3925 * engines. This would be true if we accessed the activity tracking
3926 * under the lock, but as we perform the lookup of the object and
3927 * its activity locklessly we can not guarantee that the last_write
3928 * being active implies that we have set the same engine flag from
3929 * last_read - hence we always set both read and write busy for
3932 return id | __busy_read_flag(id);
3935 static __always_inline unsigned int
3936 __busy_set_if_active(const struct i915_gem_active *active,
3937 unsigned int (*flag)(unsigned int id))
3939 struct drm_i915_gem_request *request;
3941 request = rcu_dereference(active->request);
3942 if (!request || i915_gem_request_completed(request))
3945 /* This is racy. See __i915_gem_active_get_rcu() for an in detail
3946 * discussion of how to handle the race correctly, but for reporting
3947 * the busy state we err on the side of potentially reporting the
3948 * wrong engine as being busy (but we guarantee that the result
3949 * is at least self-consistent).
3951 * As we use SLAB_DESTROY_BY_RCU, the request may be reallocated
3952 * whilst we are inspecting it, even under the RCU read lock as we are.
3953 * This means that there is a small window for the engine and/or the
3954 * seqno to have been overwritten. The seqno will always be in the
3955 * future compared to the intended, and so we know that if that
3956 * seqno is idle (on whatever engine) our request is idle and the
3957 * return 0 above is correct.
3959 * The issue is that if the engine is switched, it is just as likely
3960 * to report that it is busy (but since the switch happened, we know
3961 * the request should be idle). So there is a small chance that a busy
3962 * result is actually the wrong engine.
3964 * So why don't we care?
3966 * For starters, the busy ioctl is a heuristic that is by definition
3967 * racy. Even with perfect serialisation in the driver, the hardware
3968 * state is constantly advancing - the state we report to the user
3971 * The critical information for the busy-ioctl is whether the object
3972 * is idle as userspace relies on that to detect whether its next
3973 * access will stall, or if it has missed submitting commands to
3974 * the hardware allowing the GPU to stall. We never generate a
3975 * false-positive for idleness, thus busy-ioctl is reliable at the
3976 * most fundamental level, and we maintain the guarantee that a
3977 * busy object left to itself will eventually become idle (and stay
3980 * We allow ourselves the leeway of potentially misreporting the busy
3981 * state because that is an optimisation heuristic that is constantly
3982 * in flux. Being quickly able to detect the busy/idle state is much
3983 * more important than accurate logging of exactly which engines were
3986 * For accuracy in reporting the engine, we could use
3989 * request = __i915_gem_active_get_rcu(active);
3991 * if (!i915_gem_request_completed(request))
3992 * result = flag(request->engine->exec_id);
3993 * i915_gem_request_put(request);
3996 * but that still remains susceptible to both hardware and userspace
3997 * races. So we accept making the result of that race slightly worse,
3998 * given the rarity of the race and its low impact on the result.
4000 return flag(READ_ONCE(request->engine->exec_id));
4003 static __always_inline unsigned int
4004 busy_check_reader(const struct i915_gem_active *active)
4006 return __busy_set_if_active(active, __busy_read_flag);
4009 static __always_inline unsigned int
4010 busy_check_writer(const struct i915_gem_active *active)
4012 return __busy_set_if_active(active, __busy_write_id);
4016 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4017 struct drm_file *file)
4019 struct drm_i915_gem_busy *args = data;
4020 struct drm_i915_gem_object *obj;
4021 unsigned long active;
4023 obj = i915_gem_object_lookup(file, args->handle);
4028 active = __I915_BO_ACTIVE(obj);
4032 /* Yes, the lookups are intentionally racy.
4034 * First, we cannot simply rely on __I915_BO_ACTIVE. We have
4035 * to regard the value as stale and as our ABI guarantees
4036 * forward progress, we confirm the status of each active
4037 * request with the hardware.
4039 * Even though we guard the pointer lookup by RCU, that only
4040 * guarantees that the pointer and its contents remain
4041 * dereferencable and does *not* mean that the request we
4042 * have is the same as the one being tracked by the object.
4044 * Consider that we lookup the request just as it is being
4045 * retired and freed. We take a local copy of the pointer,
4046 * but before we add its engine into the busy set, the other
4047 * thread reallocates it and assigns it to a task on another
4048 * engine with a fresh and incomplete seqno. Guarding against
4049 * that requires careful serialisation and reference counting,
4050 * i.e. using __i915_gem_active_get_request_rcu(). We don't,
4051 * instead we expect that if the result is busy, which engines
4052 * are busy is not completely reliable - we only guarantee
4053 * that the object was busy.
4057 for_each_active(active, idx)
4058 args->busy |= busy_check_reader(&obj->last_read[idx]);
4060 /* For ABI sanity, we only care that the write engine is in
4061 * the set of read engines. This should be ensured by the
4062 * ordering of setting last_read/last_write in
4063 * i915_vma_move_to_active(), and then in reverse in retire.
4064 * However, for good measure, we always report the last_write
4065 * request as a busy read as well as being a busy write.
4067 * We don't care that the set of active read/write engines
4068 * may change during construction of the result, as it is
4069 * equally liable to change before userspace can inspect
4072 args->busy |= busy_check_writer(&obj->last_write);
4077 i915_gem_object_put_unlocked(obj);
4082 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4083 struct drm_file *file_priv)
4085 return i915_gem_ring_throttle(dev, file_priv);
4089 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4090 struct drm_file *file_priv)
4092 struct drm_i915_private *dev_priv = to_i915(dev);
4093 struct drm_i915_gem_madvise *args = data;
4094 struct drm_i915_gem_object *obj;
4097 switch (args->madv) {
4098 case I915_MADV_DONTNEED:
4099 case I915_MADV_WILLNEED:
4105 ret = i915_mutex_lock_interruptible(dev);
4109 obj = i915_gem_object_lookup(file_priv, args->handle);
4116 i915_gem_object_is_tiled(obj) &&
4117 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4118 if (obj->madv == I915_MADV_WILLNEED)
4119 i915_gem_object_unpin_pages(obj);
4120 if (args->madv == I915_MADV_WILLNEED)
4121 i915_gem_object_pin_pages(obj);
4124 if (obj->madv != __I915_MADV_PURGED)
4125 obj->madv = args->madv;
4127 /* if the object is no longer attached, discard its backing storage */
4128 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
4129 i915_gem_object_truncate(obj);
4131 args->retained = obj->madv != __I915_MADV_PURGED;
4133 i915_gem_object_put(obj);
4135 mutex_unlock(&dev->struct_mutex);
4139 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4140 const struct drm_i915_gem_object_ops *ops)
4144 INIT_LIST_HEAD(&obj->global_list);
4145 INIT_LIST_HEAD(&obj->userfault_link);
4146 for (i = 0; i < I915_NUM_ENGINES; i++)
4147 init_request_active(&obj->last_read[i],
4148 i915_gem_object_retire__read);
4149 init_request_active(&obj->last_write,
4150 i915_gem_object_retire__write);
4151 INIT_LIST_HEAD(&obj->obj_exec_link);
4152 INIT_LIST_HEAD(&obj->vma_list);
4153 INIT_LIST_HEAD(&obj->batch_pool_link);
4157 obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
4158 obj->madv = I915_MADV_WILLNEED;
4160 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4163 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4164 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
4165 .get_pages = i915_gem_object_get_pages_gtt,
4166 .put_pages = i915_gem_object_put_pages_gtt,
4169 /* Note we don't consider signbits :| */
4170 #define overflows_type(x, T) \
4171 (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
4173 struct drm_i915_gem_object *
4174 i915_gem_object_create(struct drm_device *dev, u64 size)
4176 struct drm_i915_gem_object *obj;
4177 struct address_space *mapping;
4181 /* There is a prevalence of the assumption that we fit the object's
4182 * page count inside a 32bit _signed_ variable. Let's document this and
4183 * catch if we ever need to fix it. In the meantime, if you do spot
4184 * such a local variable, please consider fixing!
4186 if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
4187 return ERR_PTR(-E2BIG);
4189 if (overflows_type(size, obj->base.size))
4190 return ERR_PTR(-E2BIG);
4192 obj = i915_gem_object_alloc(dev);
4194 return ERR_PTR(-ENOMEM);
4196 ret = drm_gem_object_init(dev, &obj->base, size);
4200 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4201 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4202 /* 965gm cannot relocate objects above 4GiB. */
4203 mask &= ~__GFP_HIGHMEM;
4204 mask |= __GFP_DMA32;
4207 mapping = obj->base.filp->f_mapping;
4208 mapping_set_gfp_mask(mapping, mask);
4210 i915_gem_object_init(obj, &i915_gem_object_ops);
4212 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4213 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4216 /* On some devices, we can have the GPU use the LLC (the CPU
4217 * cache) for about a 10% performance improvement
4218 * compared to uncached. Graphics requests other than
4219 * display scanout are coherent with the CPU in
4220 * accessing this cache. This means in this mode we
4221 * don't need to clflush on the CPU side, and on the
4222 * GPU side we only need to flush internal caches to
4223 * get data visible to the CPU.
4225 * However, we maintain the display planes as UC, and so
4226 * need to rebind when first used as such.
4228 obj->cache_level = I915_CACHE_LLC;
4230 obj->cache_level = I915_CACHE_NONE;
4232 trace_i915_gem_object_create(obj);
4237 i915_gem_object_free(obj);
4239 return ERR_PTR(ret);
4242 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4244 /* If we are the last user of the backing storage (be it shmemfs
4245 * pages or stolen etc), we know that the pages are going to be
4246 * immediately released. In this case, we can then skip copying
4247 * back the contents from the GPU.
4250 if (obj->madv != I915_MADV_WILLNEED)
4253 if (obj->base.filp == NULL)
4256 /* At first glance, this looks racy, but then again so would be
4257 * userspace racing mmap against close. However, the first external
4258 * reference to the filp can only be obtained through the
4259 * i915_gem_mmap_ioctl() which safeguards us against the user
4260 * acquiring such a reference whilst we are in the middle of
4261 * freeing the object.
4263 return atomic_long_read(&obj->base.filp->f_count) == 1;
4266 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4268 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4269 struct drm_device *dev = obj->base.dev;
4270 struct drm_i915_private *dev_priv = to_i915(dev);
4271 struct i915_vma *vma, *next;
4273 intel_runtime_pm_get(dev_priv);
4275 trace_i915_gem_object_destroy(obj);
4277 /* All file-owned VMA should have been released by this point through
4278 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4279 * However, the object may also be bound into the global GTT (e.g.
4280 * older GPUs without per-process support, or for direct access through
4281 * the GTT either for the user or for scanout). Those VMA still need to
4284 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4285 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
4286 GEM_BUG_ON(i915_vma_is_active(vma));
4287 vma->flags &= ~I915_VMA_PIN_MASK;
4288 i915_vma_close(vma);
4290 GEM_BUG_ON(obj->bind_count);
4292 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4293 * before progressing. */
4295 i915_gem_object_unpin_pages(obj);
4297 WARN_ON(atomic_read(&obj->frontbuffer_bits));
4299 if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4300 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4301 i915_gem_object_is_tiled(obj))
4302 i915_gem_object_unpin_pages(obj);
4304 if (WARN_ON(obj->pages_pin_count))
4305 obj->pages_pin_count = 0;
4306 if (discard_backing_storage(obj))
4307 obj->madv = I915_MADV_DONTNEED;
4308 i915_gem_object_put_pages(obj);
4312 if (obj->base.import_attach)
4313 drm_prime_gem_destroy(&obj->base, NULL);
4315 if (obj->ops->release)
4316 obj->ops->release(obj);
4318 drm_gem_object_release(&obj->base);
4319 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4322 i915_gem_object_free(obj);
4324 intel_runtime_pm_put(dev_priv);
4327 int i915_gem_suspend(struct drm_device *dev)
4329 struct drm_i915_private *dev_priv = to_i915(dev);
4332 intel_suspend_gt_powersave(dev_priv);
4334 mutex_lock(&dev->struct_mutex);
4336 /* We have to flush all the executing contexts to main memory so
4337 * that they can saved in the hibernation image. To ensure the last
4338 * context image is coherent, we have to switch away from it. That
4339 * leaves the dev_priv->kernel_context still active when
4340 * we actually suspend, and its image in memory may not match the GPU
4341 * state. Fortunately, the kernel_context is disposable and we do
4342 * not rely on its state.
4344 ret = i915_gem_switch_to_kernel_context(dev_priv);
4348 ret = i915_gem_wait_for_idle(dev_priv,
4349 I915_WAIT_INTERRUPTIBLE |
4354 i915_gem_retire_requests(dev_priv);
4356 i915_gem_context_lost(dev_priv);
4357 mutex_unlock(&dev->struct_mutex);
4359 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4360 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4361 flush_delayed_work(&dev_priv->gt.idle_work);
4363 /* Assert that we sucessfully flushed all the work and
4364 * reset the GPU back to its idle, low power state.
4366 WARN_ON(dev_priv->gt.awake);
4369 * Neither the BIOS, ourselves or any other kernel
4370 * expects the system to be in execlists mode on startup,
4371 * so we need to reset the GPU back to legacy mode. And the only
4372 * known way to disable logical contexts is through a GPU reset.
4374 * So in order to leave the system in a known default configuration,
4375 * always reset the GPU upon unload and suspend. Afterwards we then
4376 * clean up the GEM state tracking, flushing off the requests and
4377 * leaving the system in a known idle state.
4379 * Note that is of the upmost importance that the GPU is idle and
4380 * all stray writes are flushed *before* we dismantle the backing
4381 * storage for the pinned objects.
4383 * However, since we are uncertain that resetting the GPU on older
4384 * machines is a good idea, we don't - just in case it leaves the
4385 * machine in an unusable condition.
4387 if (HAS_HW_CONTEXTS(dev)) {
4388 int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
4389 WARN_ON(reset && reset != -ENODEV);
4395 mutex_unlock(&dev->struct_mutex);
4399 void i915_gem_resume(struct drm_device *dev)
4401 struct drm_i915_private *dev_priv = to_i915(dev);
4403 mutex_lock(&dev->struct_mutex);
4404 i915_gem_restore_gtt_mappings(dev);
4406 /* As we didn't flush the kernel context before suspend, we cannot
4407 * guarantee that the context image is complete. So let's just reset
4408 * it and start again.
4410 dev_priv->gt.resume(dev_priv);
4412 mutex_unlock(&dev->struct_mutex);
4415 void i915_gem_init_swizzling(struct drm_device *dev)
4417 struct drm_i915_private *dev_priv = to_i915(dev);
4419 if (INTEL_INFO(dev)->gen < 5 ||
4420 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4423 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4424 DISP_TILE_SURFACE_SWIZZLING);
4426 if (IS_GEN5(dev_priv))
4429 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4430 if (IS_GEN6(dev_priv))
4431 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4432 else if (IS_GEN7(dev_priv))
4433 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4434 else if (IS_GEN8(dev_priv))
4435 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4440 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4442 I915_WRITE(RING_CTL(base), 0);
4443 I915_WRITE(RING_HEAD(base), 0);
4444 I915_WRITE(RING_TAIL(base), 0);
4445 I915_WRITE(RING_START(base), 0);
4448 static void init_unused_rings(struct drm_i915_private *dev_priv)
4450 if (IS_I830(dev_priv)) {
4451 init_unused_ring(dev_priv, PRB1_BASE);
4452 init_unused_ring(dev_priv, SRB0_BASE);
4453 init_unused_ring(dev_priv, SRB1_BASE);
4454 init_unused_ring(dev_priv, SRB2_BASE);
4455 init_unused_ring(dev_priv, SRB3_BASE);
4456 } else if (IS_GEN2(dev_priv)) {
4457 init_unused_ring(dev_priv, SRB0_BASE);
4458 init_unused_ring(dev_priv, SRB1_BASE);
4459 } else if (IS_GEN3(dev_priv)) {
4460 init_unused_ring(dev_priv, PRB1_BASE);
4461 init_unused_ring(dev_priv, PRB2_BASE);
4466 i915_gem_init_hw(struct drm_device *dev)
4468 struct drm_i915_private *dev_priv = to_i915(dev);
4469 struct intel_engine_cs *engine;
4470 enum intel_engine_id id;
4473 /* Double layer security blanket, see i915_gem_init() */
4474 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4476 if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
4477 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4479 if (IS_HASWELL(dev_priv))
4480 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
4481 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4483 if (HAS_PCH_NOP(dev_priv)) {
4484 if (IS_IVYBRIDGE(dev_priv)) {
4485 u32 temp = I915_READ(GEN7_MSG_CTL);
4486 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4487 I915_WRITE(GEN7_MSG_CTL, temp);
4488 } else if (INTEL_INFO(dev)->gen >= 7) {
4489 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4490 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4491 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4495 i915_gem_init_swizzling(dev);
4498 * At least 830 can leave some of the unused rings
4499 * "active" (ie. head != tail) after resume which
4500 * will prevent c3 entry. Makes sure all unused rings
4503 init_unused_rings(dev_priv);
4505 BUG_ON(!dev_priv->kernel_context);
4507 ret = i915_ppgtt_init_hw(dev);
4509 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4513 /* Need to do basic initialisation of all rings first: */
4514 for_each_engine(engine, dev_priv, id) {
4515 ret = engine->init_hw(engine);
4520 intel_mocs_init_l3cc_table(dev);
4522 /* We can't enable contexts until all firmware is loaded */
4523 ret = intel_guc_setup(dev);
4528 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4532 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4534 if (INTEL_INFO(dev_priv)->gen < 6)
4537 /* TODO: make semaphores and Execlists play nicely together */
4538 if (i915.enable_execlists)
4544 #ifdef CONFIG_INTEL_IOMMU
4545 /* Enable semaphores on SNB when IO remapping is off */
4546 if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
4553 int i915_gem_init(struct drm_device *dev)
4555 struct drm_i915_private *dev_priv = to_i915(dev);
4558 mutex_lock(&dev->struct_mutex);
4560 if (!i915.enable_execlists) {
4561 dev_priv->gt.resume = intel_legacy_submission_resume;
4562 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4564 dev_priv->gt.resume = intel_lr_context_resume;
4565 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4568 /* This is just a security blanket to placate dragons.
4569 * On some systems, we very sporadically observe that the first TLBs
4570 * used by the CS may be stale, despite us poking the TLB reset. If
4571 * we hold the forcewake during initialisation these problems
4572 * just magically go away.
4574 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4576 i915_gem_init_userptr(dev_priv);
4578 ret = i915_gem_init_ggtt(dev_priv);
4582 ret = i915_gem_context_init(dev);
4586 ret = intel_engines_init(dev);
4590 ret = i915_gem_init_hw(dev);
4592 /* Allow engine initialisation to fail by marking the GPU as
4593 * wedged. But we only want to do this where the GPU is angry,
4594 * for all other failure, such as an allocation failure, bail.
4596 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4597 i915_gem_set_wedged(dev_priv);
4602 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4603 mutex_unlock(&dev->struct_mutex);
4609 i915_gem_cleanup_engines(struct drm_device *dev)
4611 struct drm_i915_private *dev_priv = to_i915(dev);
4612 struct intel_engine_cs *engine;
4613 enum intel_engine_id id;
4615 for_each_engine(engine, dev_priv, id)
4616 dev_priv->gt.cleanup_engine(engine);
4620 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4622 struct drm_device *dev = &dev_priv->drm;
4625 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4626 !IS_CHERRYVIEW(dev_priv))
4627 dev_priv->num_fence_regs = 32;
4628 else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
4629 IS_I945GM(dev_priv) || IS_G33(dev_priv))
4630 dev_priv->num_fence_regs = 16;
4632 dev_priv->num_fence_regs = 8;
4634 if (intel_vgpu_active(dev_priv))
4635 dev_priv->num_fence_regs =
4636 I915_READ(vgtif_reg(avail_rs.fence_num));
4638 /* Initialize fence registers to zero */
4639 for (i = 0; i < dev_priv->num_fence_regs; i++) {
4640 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
4642 fence->i915 = dev_priv;
4644 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
4646 i915_gem_restore_fences(dev);
4648 i915_gem_detect_bit_6_swizzle(dev);
4652 i915_gem_load_init(struct drm_device *dev)
4654 struct drm_i915_private *dev_priv = to_i915(dev);
4657 kmem_cache_create("i915_gem_object",
4658 sizeof(struct drm_i915_gem_object), 0,
4662 kmem_cache_create("i915_gem_vma",
4663 sizeof(struct i915_vma), 0,
4666 dev_priv->requests =
4667 kmem_cache_create("i915_gem_request",
4668 sizeof(struct drm_i915_gem_request), 0,
4669 SLAB_HWCACHE_ALIGN |
4670 SLAB_RECLAIM_ACCOUNT |
4671 SLAB_DESTROY_BY_RCU,
4674 INIT_LIST_HEAD(&dev_priv->context_list);
4675 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4676 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4677 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4678 INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
4679 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4680 i915_gem_retire_work_handler);
4681 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4682 i915_gem_idle_work_handler);
4683 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4684 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4686 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4688 init_waitqueue_head(&dev_priv->pending_flip_queue);
4690 dev_priv->mm.interruptible = true;
4692 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
4694 spin_lock_init(&dev_priv->fb_tracking.lock);
4697 void i915_gem_load_cleanup(struct drm_device *dev)
4699 struct drm_i915_private *dev_priv = to_i915(dev);
4701 kmem_cache_destroy(dev_priv->requests);
4702 kmem_cache_destroy(dev_priv->vmas);
4703 kmem_cache_destroy(dev_priv->objects);
4705 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
4709 int i915_gem_freeze(struct drm_i915_private *dev_priv)
4711 intel_runtime_pm_get(dev_priv);
4713 mutex_lock(&dev_priv->drm.struct_mutex);
4714 i915_gem_shrink_all(dev_priv);
4715 mutex_unlock(&dev_priv->drm.struct_mutex);
4717 intel_runtime_pm_put(dev_priv);
4722 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4724 struct drm_i915_gem_object *obj;
4725 struct list_head *phases[] = {
4726 &dev_priv->mm.unbound_list,
4727 &dev_priv->mm.bound_list,
4731 /* Called just before we write the hibernation image.
4733 * We need to update the domain tracking to reflect that the CPU
4734 * will be accessing all the pages to create and restore from the
4735 * hibernation, and so upon restoration those pages will be in the
4738 * To make sure the hibernation image contains the latest state,
4739 * we update that state just before writing out the image.
4741 * To try and reduce the hibernation image, we manually shrink
4742 * the objects as well.
4745 mutex_lock(&dev_priv->drm.struct_mutex);
4746 i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
4748 for (p = phases; *p; p++) {
4749 list_for_each_entry(obj, *p, global_list) {
4750 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4751 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4754 mutex_unlock(&dev_priv->drm.struct_mutex);
4759 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4761 struct drm_i915_file_private *file_priv = file->driver_priv;
4762 struct drm_i915_gem_request *request;
4764 /* Clean up our request list when the client is going away, so that
4765 * later retire_requests won't dereference our soon-to-be-gone
4768 spin_lock(&file_priv->mm.lock);
4769 list_for_each_entry(request, &file_priv->mm.request_list, client_list)
4770 request->file_priv = NULL;
4771 spin_unlock(&file_priv->mm.lock);
4773 if (!list_empty(&file_priv->rps.link)) {
4774 spin_lock(&to_i915(dev)->rps.client_lock);
4775 list_del(&file_priv->rps.link);
4776 spin_unlock(&to_i915(dev)->rps.client_lock);
4780 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4782 struct drm_i915_file_private *file_priv;
4785 DRM_DEBUG_DRIVER("\n");
4787 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4791 file->driver_priv = file_priv;
4792 file_priv->dev_priv = to_i915(dev);
4793 file_priv->file = file;
4794 INIT_LIST_HEAD(&file_priv->rps.link);
4796 spin_lock_init(&file_priv->mm.lock);
4797 INIT_LIST_HEAD(&file_priv->mm.request_list);
4799 file_priv->bsd_engine = -1;
4801 ret = i915_gem_context_open(dev, file);
4809 * i915_gem_track_fb - update frontbuffer tracking
4810 * @old: current GEM buffer for the frontbuffer slots
4811 * @new: new GEM buffer for the frontbuffer slots
4812 * @frontbuffer_bits: bitmask of frontbuffer slots
4814 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4815 * from @old and setting them in @new. Both @old and @new can be NULL.
4817 void i915_gem_track_fb(struct drm_i915_gem_object *old,
4818 struct drm_i915_gem_object *new,
4819 unsigned frontbuffer_bits)
4821 /* Control of individual bits within the mask are guarded by
4822 * the owning plane->mutex, i.e. we can never see concurrent
4823 * manipulation of individual bits. But since the bitfield as a whole
4824 * is updated using RMW, we need to use atomics in order to update
4827 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
4828 sizeof(atomic_t) * BITS_PER_BYTE);
4831 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
4832 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
4836 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
4837 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
4841 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
4843 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
4847 /* Only default objects have per-page dirty tracking */
4848 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
4851 page = i915_gem_object_get_page(obj, n);
4852 set_page_dirty(page);
4856 /* Allocate a new GEM object and fill it with the supplied data */
4857 struct drm_i915_gem_object *
4858 i915_gem_object_create_from_data(struct drm_device *dev,
4859 const void *data, size_t size)
4861 struct drm_i915_gem_object *obj;
4862 struct sg_table *sg;
4866 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
4870 ret = i915_gem_object_set_to_cpu_domain(obj, true);
4874 ret = i915_gem_object_get_pages(obj);
4878 i915_gem_object_pin_pages(obj);
4880 bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
4881 obj->dirty = 1; /* Backing store is now out of date */
4882 i915_gem_object_unpin_pages(obj);
4884 if (WARN_ON(bytes != size)) {
4885 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
4893 i915_gem_object_put(obj);
4894 return ERR_PTR(ret);