2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
32 #include "i915_gem_dmabuf.h"
33 #include "i915_vgpu.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36 #include "intel_frontbuffer.h"
37 #include "intel_mocs.h"
38 #include <linux/reservation.h>
39 #include <linux/shmem_fs.h>
40 #include <linux/slab.h>
41 #include <linux/swap.h>
42 #include <linux/pci.h>
43 #include <linux/dma-buf.h>
45 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
46 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
48 static bool cpu_cache_is_coherent(struct drm_device *dev,
49 enum i915_cache_level level)
51 return HAS_LLC(dev) || level != I915_CACHE_NONE;
54 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
56 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
59 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
62 return obj->pin_display;
66 insert_mappable_node(struct drm_i915_private *i915,
67 struct drm_mm_node *node, u32 size)
69 memset(node, 0, sizeof(*node));
70 return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
72 i915->ggtt.mappable_end,
73 DRM_MM_SEARCH_DEFAULT,
74 DRM_MM_CREATE_DEFAULT);
78 remove_mappable_node(struct drm_mm_node *node)
80 drm_mm_remove_node(node);
83 /* some bookkeeping */
84 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
87 spin_lock(&dev_priv->mm.object_stat_lock);
88 dev_priv->mm.object_count++;
89 dev_priv->mm.object_memory += size;
90 spin_unlock(&dev_priv->mm.object_stat_lock);
93 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
96 spin_lock(&dev_priv->mm.object_stat_lock);
97 dev_priv->mm.object_count--;
98 dev_priv->mm.object_memory -= size;
99 spin_unlock(&dev_priv->mm.object_stat_lock);
103 i915_gem_wait_for_error(struct i915_gpu_error *error)
107 if (!i915_reset_in_progress(error))
111 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
112 * userspace. If it takes that long something really bad is going on and
113 * we should simply try to bail out and fail as gracefully as possible.
115 ret = wait_event_interruptible_timeout(error->reset_queue,
116 !i915_reset_in_progress(error),
119 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
121 } else if (ret < 0) {
128 int i915_mutex_lock_interruptible(struct drm_device *dev)
130 struct drm_i915_private *dev_priv = to_i915(dev);
133 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
137 ret = mutex_lock_interruptible(&dev->struct_mutex);
145 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
146 struct drm_file *file)
148 struct drm_i915_private *dev_priv = to_i915(dev);
149 struct i915_ggtt *ggtt = &dev_priv->ggtt;
150 struct drm_i915_gem_get_aperture *args = data;
151 struct i915_vma *vma;
155 mutex_lock(&dev->struct_mutex);
156 list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
157 if (i915_vma_is_pinned(vma))
158 pinned += vma->node.size;
159 list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
160 if (i915_vma_is_pinned(vma))
161 pinned += vma->node.size;
162 mutex_unlock(&dev->struct_mutex);
164 args->aper_size = ggtt->base.total;
165 args->aper_available_size = args->aper_size - pinned;
171 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
173 struct address_space *mapping = obj->base.filp->f_mapping;
174 char *vaddr = obj->phys_handle->vaddr;
176 struct scatterlist *sg;
179 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
182 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
186 page = shmem_read_mapping_page(mapping, i);
188 return PTR_ERR(page);
190 src = kmap_atomic(page);
191 memcpy(vaddr, src, PAGE_SIZE);
192 drm_clflush_virt_range(vaddr, PAGE_SIZE);
199 i915_gem_chipset_flush(to_i915(obj->base.dev));
201 st = kmalloc(sizeof(*st), GFP_KERNEL);
205 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
212 sg->length = obj->base.size;
214 sg_dma_address(sg) = obj->phys_handle->busaddr;
215 sg_dma_len(sg) = obj->base.size;
222 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
226 BUG_ON(obj->madv == __I915_MADV_PURGED);
228 ret = i915_gem_object_set_to_cpu_domain(obj, true);
230 /* In the event of a disaster, abandon all caches and
233 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
236 if (obj->madv == I915_MADV_DONTNEED)
240 struct address_space *mapping = obj->base.filp->f_mapping;
241 char *vaddr = obj->phys_handle->vaddr;
244 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
248 page = shmem_read_mapping_page(mapping, i);
252 dst = kmap_atomic(page);
253 drm_clflush_virt_range(vaddr, PAGE_SIZE);
254 memcpy(dst, vaddr, PAGE_SIZE);
257 set_page_dirty(page);
258 if (obj->madv == I915_MADV_WILLNEED)
259 mark_page_accessed(page);
266 sg_free_table(obj->pages);
271 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
273 drm_pci_free(obj->base.dev, obj->phys_handle);
276 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
277 .get_pages = i915_gem_object_get_pages_phys,
278 .put_pages = i915_gem_object_put_pages_phys,
279 .release = i915_gem_object_release_phys,
283 i915_gem_object_unbind(struct drm_i915_gem_object *obj)
285 struct i915_vma *vma;
286 LIST_HEAD(still_in_list);
289 /* The vma will only be freed if it is marked as closed, and if we wait
290 * upon rendering to the vma, we may unbind anything in the list.
292 while ((vma = list_first_entry_or_null(&obj->vma_list,
295 list_move_tail(&vma->obj_link, &still_in_list);
296 ret = i915_vma_unbind(vma);
300 list_splice(&still_in_list, &obj->vma_list);
306 * Ensures that all rendering to the object has completed and the object is
307 * safe to unbind from the GTT or access from the CPU.
308 * @obj: i915 gem object
309 * @readonly: waiting for just read access or read-write access
312 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
315 struct reservation_object *resv;
316 struct i915_gem_active *active;
317 unsigned long active_mask;
320 lockdep_assert_held(&obj->base.dev->struct_mutex);
323 active = obj->last_read;
324 active_mask = i915_gem_object_get_active(obj);
327 active = &obj->last_write;
330 for_each_active(active_mask, idx) {
333 ret = i915_gem_active_wait(&active[idx],
334 &obj->base.dev->struct_mutex);
339 resv = i915_gem_object_get_dmabuf_resv(obj);
343 err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
344 MAX_SCHEDULE_TIMEOUT);
352 /* A nonblocking variant of the above wait. Must be called prior to
353 * acquiring the mutex for the object, as the object state may change
354 * during this call. A reference must be held by the caller for the object.
356 static __must_check int
357 __unsafe_wait_rendering(struct drm_i915_gem_object *obj,
358 struct intel_rps_client *rps,
361 struct i915_gem_active *active;
362 unsigned long active_mask;
365 active_mask = __I915_BO_ACTIVE(obj);
370 active = obj->last_read;
373 active = &obj->last_write;
376 for_each_active(active_mask, idx) {
379 ret = i915_gem_active_wait_unlocked(&active[idx],
388 static struct intel_rps_client *to_rps_client(struct drm_file *file)
390 struct drm_i915_file_private *fpriv = file->driver_priv;
396 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
399 drm_dma_handle_t *phys;
402 if (obj->phys_handle) {
403 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
409 if (obj->madv != I915_MADV_WILLNEED)
412 if (obj->base.filp == NULL)
415 ret = i915_gem_object_unbind(obj);
419 ret = i915_gem_object_put_pages(obj);
423 /* create a new object */
424 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
428 obj->phys_handle = phys;
429 obj->ops = &i915_gem_phys_ops;
431 return i915_gem_object_get_pages(obj);
435 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
436 struct drm_i915_gem_pwrite *args,
437 struct drm_file *file_priv)
439 struct drm_device *dev = obj->base.dev;
440 void *vaddr = obj->phys_handle->vaddr + args->offset;
441 char __user *user_data = u64_to_user_ptr(args->data_ptr);
444 /* We manually control the domain here and pretend that it
445 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
447 ret = i915_gem_object_wait_rendering(obj, false);
451 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
452 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
453 unsigned long unwritten;
455 /* The physical object once assigned is fixed for the lifetime
456 * of the obj, so we can safely drop the lock and continue
459 mutex_unlock(&dev->struct_mutex);
460 unwritten = copy_from_user(vaddr, user_data, args->size);
461 mutex_lock(&dev->struct_mutex);
468 drm_clflush_virt_range(vaddr, args->size);
469 i915_gem_chipset_flush(to_i915(dev));
472 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
476 void *i915_gem_object_alloc(struct drm_device *dev)
478 struct drm_i915_private *dev_priv = to_i915(dev);
479 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
482 void i915_gem_object_free(struct drm_i915_gem_object *obj)
484 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
485 kmem_cache_free(dev_priv->objects, obj);
489 i915_gem_create(struct drm_file *file,
490 struct drm_device *dev,
494 struct drm_i915_gem_object *obj;
498 size = roundup(size, PAGE_SIZE);
502 /* Allocate the new object */
503 obj = i915_gem_object_create(dev, size);
507 ret = drm_gem_handle_create(file, &obj->base, &handle);
508 /* drop reference from allocate - handle holds it now */
509 i915_gem_object_put_unlocked(obj);
518 i915_gem_dumb_create(struct drm_file *file,
519 struct drm_device *dev,
520 struct drm_mode_create_dumb *args)
522 /* have to work out size/pitch and return them */
523 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
524 args->size = args->pitch * args->height;
525 return i915_gem_create(file, dev,
526 args->size, &args->handle);
530 * Creates a new mm object and returns a handle to it.
531 * @dev: drm device pointer
532 * @data: ioctl data blob
533 * @file: drm file pointer
536 i915_gem_create_ioctl(struct drm_device *dev, void *data,
537 struct drm_file *file)
539 struct drm_i915_gem_create *args = data;
541 return i915_gem_create(file, dev,
542 args->size, &args->handle);
546 __copy_to_user_swizzled(char __user *cpu_vaddr,
547 const char *gpu_vaddr, int gpu_offset,
550 int ret, cpu_offset = 0;
553 int cacheline_end = ALIGN(gpu_offset + 1, 64);
554 int this_length = min(cacheline_end - gpu_offset, length);
555 int swizzled_gpu_offset = gpu_offset ^ 64;
557 ret = __copy_to_user(cpu_vaddr + cpu_offset,
558 gpu_vaddr + swizzled_gpu_offset,
563 cpu_offset += this_length;
564 gpu_offset += this_length;
565 length -= this_length;
572 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
573 const char __user *cpu_vaddr,
576 int ret, cpu_offset = 0;
579 int cacheline_end = ALIGN(gpu_offset + 1, 64);
580 int this_length = min(cacheline_end - gpu_offset, length);
581 int swizzled_gpu_offset = gpu_offset ^ 64;
583 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
584 cpu_vaddr + cpu_offset,
589 cpu_offset += this_length;
590 gpu_offset += this_length;
591 length -= this_length;
598 * Pins the specified object's pages and synchronizes the object with
599 * GPU accesses. Sets needs_clflush to non-zero if the caller should
600 * flush the object from the CPU cache.
602 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
609 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
612 ret = i915_gem_object_wait_rendering(obj, true);
616 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
617 /* If we're not in the cpu read domain, set ourself into the gtt
618 * read domain and manually flush cachelines (if required). This
619 * optimizes for the case when the gpu will dirty the data
620 * anyway again before the next pread happens. */
621 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
625 ret = i915_gem_object_get_pages(obj);
629 i915_gem_object_pin_pages(obj);
634 /* Per-page copy function for the shmem pread fastpath.
635 * Flushes invalid cachelines before reading the target if
636 * needs_clflush is set. */
638 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
639 char __user *user_data,
640 bool page_do_bit17_swizzling, bool needs_clflush)
645 if (unlikely(page_do_bit17_swizzling))
648 vaddr = kmap_atomic(page);
650 drm_clflush_virt_range(vaddr + shmem_page_offset,
652 ret = __copy_to_user_inatomic(user_data,
653 vaddr + shmem_page_offset,
655 kunmap_atomic(vaddr);
657 return ret ? -EFAULT : 0;
661 shmem_clflush_swizzled_range(char *addr, unsigned long length,
664 if (unlikely(swizzled)) {
665 unsigned long start = (unsigned long) addr;
666 unsigned long end = (unsigned long) addr + length;
668 /* For swizzling simply ensure that we always flush both
669 * channels. Lame, but simple and it works. Swizzled
670 * pwrite/pread is far from a hotpath - current userspace
671 * doesn't use it at all. */
672 start = round_down(start, 128);
673 end = round_up(end, 128);
675 drm_clflush_virt_range((void *)start, end - start);
677 drm_clflush_virt_range(addr, length);
682 /* Only difference to the fast-path function is that this can handle bit17
683 * and uses non-atomic copy and kmap functions. */
685 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
686 char __user *user_data,
687 bool page_do_bit17_swizzling, bool needs_clflush)
694 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
696 page_do_bit17_swizzling);
698 if (page_do_bit17_swizzling)
699 ret = __copy_to_user_swizzled(user_data,
700 vaddr, shmem_page_offset,
703 ret = __copy_to_user(user_data,
704 vaddr + shmem_page_offset,
708 return ret ? - EFAULT : 0;
711 static inline unsigned long
712 slow_user_access(struct io_mapping *mapping,
713 uint64_t page_base, int page_offset,
714 char __user *user_data,
715 unsigned long length, bool pwrite)
717 void __iomem *ioaddr;
721 ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
722 /* We can use the cpu mem copy function because this is X86. */
723 vaddr = (void __force *)ioaddr + page_offset;
725 unwritten = __copy_from_user(vaddr, user_data, length);
727 unwritten = __copy_to_user(user_data, vaddr, length);
729 io_mapping_unmap(ioaddr);
734 i915_gem_gtt_pread(struct drm_device *dev,
735 struct drm_i915_gem_object *obj, uint64_t size,
736 uint64_t data_offset, uint64_t data_ptr)
738 struct drm_i915_private *dev_priv = to_i915(dev);
739 struct i915_ggtt *ggtt = &dev_priv->ggtt;
740 struct drm_mm_node node;
741 char __user *user_data;
746 ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
748 ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
752 ret = i915_gem_object_get_pages(obj);
754 remove_mappable_node(&node);
758 i915_gem_object_pin_pages(obj);
760 node.start = i915_gem_obj_ggtt_offset(obj);
761 node.allocated = false;
762 ret = i915_gem_object_put_fence(obj);
767 ret = i915_gem_object_set_to_gtt_domain(obj, false);
771 user_data = u64_to_user_ptr(data_ptr);
773 offset = data_offset;
775 mutex_unlock(&dev->struct_mutex);
776 if (likely(!i915.prefault_disable)) {
777 ret = fault_in_multipages_writeable(user_data, remain);
779 mutex_lock(&dev->struct_mutex);
785 /* Operation in this page
787 * page_base = page offset within aperture
788 * page_offset = offset within page
789 * page_length = bytes to copy for this page
791 u32 page_base = node.start;
792 unsigned page_offset = offset_in_page(offset);
793 unsigned page_length = PAGE_SIZE - page_offset;
794 page_length = remain < page_length ? remain : page_length;
795 if (node.allocated) {
797 ggtt->base.insert_page(&ggtt->base,
798 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
803 page_base += offset & PAGE_MASK;
805 /* This is a slow read/write as it tries to read from
806 * and write to user memory which may result into page
807 * faults, and so we cannot perform this under struct_mutex.
809 if (slow_user_access(ggtt->mappable, page_base,
810 page_offset, user_data,
811 page_length, false)) {
816 remain -= page_length;
817 user_data += page_length;
818 offset += page_length;
821 mutex_lock(&dev->struct_mutex);
822 if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
823 /* The user has modified the object whilst we tried
824 * reading from it, and we now have no idea what domain
825 * the pages should be in. As we have just been touching
826 * them directly, flush everything back to the GTT
829 ret = i915_gem_object_set_to_gtt_domain(obj, false);
833 if (node.allocated) {
835 ggtt->base.clear_range(&ggtt->base,
836 node.start, node.size,
838 i915_gem_object_unpin_pages(obj);
839 remove_mappable_node(&node);
841 i915_gem_object_ggtt_unpin(obj);
848 i915_gem_shmem_pread(struct drm_device *dev,
849 struct drm_i915_gem_object *obj,
850 struct drm_i915_gem_pread *args,
851 struct drm_file *file)
853 char __user *user_data;
856 int shmem_page_offset, page_length, ret = 0;
857 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
859 int needs_clflush = 0;
860 struct sg_page_iter sg_iter;
862 if (!i915_gem_object_has_struct_page(obj))
865 user_data = u64_to_user_ptr(args->data_ptr);
868 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
870 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
874 offset = args->offset;
876 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
877 offset >> PAGE_SHIFT) {
878 struct page *page = sg_page_iter_page(&sg_iter);
883 /* Operation in this page
885 * shmem_page_offset = offset within page in shmem file
886 * page_length = bytes to copy for this page
888 shmem_page_offset = offset_in_page(offset);
889 page_length = remain;
890 if ((shmem_page_offset + page_length) > PAGE_SIZE)
891 page_length = PAGE_SIZE - shmem_page_offset;
893 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
894 (page_to_phys(page) & (1 << 17)) != 0;
896 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
897 user_data, page_do_bit17_swizzling,
902 mutex_unlock(&dev->struct_mutex);
904 if (likely(!i915.prefault_disable) && !prefaulted) {
905 ret = fault_in_multipages_writeable(user_data, remain);
906 /* Userspace is tricking us, but we've already clobbered
907 * its pages with the prefault and promised to write the
908 * data up to the first fault. Hence ignore any errors
909 * and just continue. */
914 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
915 user_data, page_do_bit17_swizzling,
918 mutex_lock(&dev->struct_mutex);
924 remain -= page_length;
925 user_data += page_length;
926 offset += page_length;
930 i915_gem_object_unpin_pages(obj);
936 * Reads data from the object referenced by handle.
937 * @dev: drm device pointer
938 * @data: ioctl data blob
939 * @file: drm file pointer
941 * On error, the contents of *data are undefined.
944 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
945 struct drm_file *file)
947 struct drm_i915_gem_pread *args = data;
948 struct drm_i915_gem_object *obj;
954 if (!access_ok(VERIFY_WRITE,
955 u64_to_user_ptr(args->data_ptr),
959 obj = i915_gem_object_lookup(file, args->handle);
963 /* Bounds check source. */
964 if (args->offset > obj->base.size ||
965 args->size > obj->base.size - args->offset) {
970 trace_i915_gem_object_pread(obj, args->offset, args->size);
972 ret = __unsafe_wait_rendering(obj, to_rps_client(file), true);
976 ret = i915_mutex_lock_interruptible(dev);
980 ret = i915_gem_shmem_pread(dev, obj, args, file);
982 /* pread for non shmem backed objects */
983 if (ret == -EFAULT || ret == -ENODEV) {
984 intel_runtime_pm_get(to_i915(dev));
985 ret = i915_gem_gtt_pread(dev, obj, args->size,
986 args->offset, args->data_ptr);
987 intel_runtime_pm_put(to_i915(dev));
990 i915_gem_object_put(obj);
991 mutex_unlock(&dev->struct_mutex);
996 i915_gem_object_put_unlocked(obj);
1000 /* This is the fast write path which cannot handle
1001 * page faults in the source data
1005 fast_user_write(struct io_mapping *mapping,
1006 loff_t page_base, int page_offset,
1007 char __user *user_data,
1010 void __iomem *vaddr_atomic;
1012 unsigned long unwritten;
1014 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
1015 /* We can use the cpu mem copy function because this is X86. */
1016 vaddr = (void __force*)vaddr_atomic + page_offset;
1017 unwritten = __copy_from_user_inatomic_nocache(vaddr,
1019 io_mapping_unmap_atomic(vaddr_atomic);
1024 * This is the fast pwrite path, where we copy the data directly from the
1025 * user into the GTT, uncached.
1026 * @i915: i915 device private data
1027 * @obj: i915 gem object
1028 * @args: pwrite arguments structure
1029 * @file: drm file pointer
1032 i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
1033 struct drm_i915_gem_object *obj,
1034 struct drm_i915_gem_pwrite *args,
1035 struct drm_file *file)
1037 struct i915_ggtt *ggtt = &i915->ggtt;
1038 struct drm_device *dev = obj->base.dev;
1039 struct drm_mm_node node;
1040 uint64_t remain, offset;
1041 char __user *user_data;
1043 bool hit_slow_path = false;
1045 if (i915_gem_object_is_tiled(obj))
1048 ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1049 PIN_MAPPABLE | PIN_NONBLOCK);
1051 ret = insert_mappable_node(i915, &node, PAGE_SIZE);
1055 ret = i915_gem_object_get_pages(obj);
1057 remove_mappable_node(&node);
1061 i915_gem_object_pin_pages(obj);
1063 node.start = i915_gem_obj_ggtt_offset(obj);
1064 node.allocated = false;
1065 ret = i915_gem_object_put_fence(obj);
1070 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1074 intel_fb_obj_invalidate(obj, ORIGIN_GTT);
1077 user_data = u64_to_user_ptr(args->data_ptr);
1078 offset = args->offset;
1079 remain = args->size;
1081 /* Operation in this page
1083 * page_base = page offset within aperture
1084 * page_offset = offset within page
1085 * page_length = bytes to copy for this page
1087 u32 page_base = node.start;
1088 unsigned page_offset = offset_in_page(offset);
1089 unsigned page_length = PAGE_SIZE - page_offset;
1090 page_length = remain < page_length ? remain : page_length;
1091 if (node.allocated) {
1092 wmb(); /* flush the write before we modify the GGTT */
1093 ggtt->base.insert_page(&ggtt->base,
1094 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1095 node.start, I915_CACHE_NONE, 0);
1096 wmb(); /* flush modifications to the GGTT (insert_page) */
1098 page_base += offset & PAGE_MASK;
1100 /* If we get a fault while copying data, then (presumably) our
1101 * source page isn't available. Return the error and we'll
1102 * retry in the slow path.
1103 * If the object is non-shmem backed, we retry again with the
1104 * path that handles page fault.
1106 if (fast_user_write(ggtt->mappable, page_base,
1107 page_offset, user_data, page_length)) {
1108 hit_slow_path = true;
1109 mutex_unlock(&dev->struct_mutex);
1110 if (slow_user_access(ggtt->mappable,
1112 page_offset, user_data,
1113 page_length, true)) {
1115 mutex_lock(&dev->struct_mutex);
1119 mutex_lock(&dev->struct_mutex);
1122 remain -= page_length;
1123 user_data += page_length;
1124 offset += page_length;
1128 if (hit_slow_path) {
1130 (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
1131 /* The user has modified the object whilst we tried
1132 * reading from it, and we now have no idea what domain
1133 * the pages should be in. As we have just been touching
1134 * them directly, flush everything back to the GTT
1137 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1141 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
1143 if (node.allocated) {
1145 ggtt->base.clear_range(&ggtt->base,
1146 node.start, node.size,
1148 i915_gem_object_unpin_pages(obj);
1149 remove_mappable_node(&node);
1151 i915_gem_object_ggtt_unpin(obj);
1157 /* Per-page copy function for the shmem pwrite fastpath.
1158 * Flushes invalid cachelines before writing to the target if
1159 * needs_clflush_before is set and flushes out any written cachelines after
1160 * writing if needs_clflush is set. */
1162 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
1163 char __user *user_data,
1164 bool page_do_bit17_swizzling,
1165 bool needs_clflush_before,
1166 bool needs_clflush_after)
1171 if (unlikely(page_do_bit17_swizzling))
1174 vaddr = kmap_atomic(page);
1175 if (needs_clflush_before)
1176 drm_clflush_virt_range(vaddr + shmem_page_offset,
1178 ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
1179 user_data, page_length);
1180 if (needs_clflush_after)
1181 drm_clflush_virt_range(vaddr + shmem_page_offset,
1183 kunmap_atomic(vaddr);
1185 return ret ? -EFAULT : 0;
1188 /* Only difference to the fast-path function is that this can handle bit17
1189 * and uses non-atomic copy and kmap functions. */
1191 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
1192 char __user *user_data,
1193 bool page_do_bit17_swizzling,
1194 bool needs_clflush_before,
1195 bool needs_clflush_after)
1201 if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1202 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1204 page_do_bit17_swizzling);
1205 if (page_do_bit17_swizzling)
1206 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
1210 ret = __copy_from_user(vaddr + shmem_page_offset,
1213 if (needs_clflush_after)
1214 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
1216 page_do_bit17_swizzling);
1219 return ret ? -EFAULT : 0;
1223 i915_gem_shmem_pwrite(struct drm_device *dev,
1224 struct drm_i915_gem_object *obj,
1225 struct drm_i915_gem_pwrite *args,
1226 struct drm_file *file)
1230 char __user *user_data;
1231 int shmem_page_offset, page_length, ret = 0;
1232 int obj_do_bit17_swizzling, page_do_bit17_swizzling;
1233 int hit_slowpath = 0;
1234 int needs_clflush_after = 0;
1235 int needs_clflush_before = 0;
1236 struct sg_page_iter sg_iter;
1238 user_data = u64_to_user_ptr(args->data_ptr);
1239 remain = args->size;
1241 obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
1243 ret = i915_gem_object_wait_rendering(obj, false);
1247 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1248 /* If we're not in the cpu write domain, set ourself into the gtt
1249 * write domain and manually flush cachelines (if required). This
1250 * optimizes for the case when the gpu will use the data
1251 * right away and we therefore have to clflush anyway. */
1252 needs_clflush_after = cpu_write_needs_clflush(obj);
1254 /* Same trick applies to invalidate partially written cachelines read
1255 * before writing. */
1256 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
1257 needs_clflush_before =
1258 !cpu_cache_is_coherent(dev, obj->cache_level);
1260 ret = i915_gem_object_get_pages(obj);
1264 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1266 i915_gem_object_pin_pages(obj);
1268 offset = args->offset;
1271 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
1272 offset >> PAGE_SHIFT) {
1273 struct page *page = sg_page_iter_page(&sg_iter);
1274 int partial_cacheline_write;
1279 /* Operation in this page
1281 * shmem_page_offset = offset within page in shmem file
1282 * page_length = bytes to copy for this page
1284 shmem_page_offset = offset_in_page(offset);
1286 page_length = remain;
1287 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1288 page_length = PAGE_SIZE - shmem_page_offset;
1290 /* If we don't overwrite a cacheline completely we need to be
1291 * careful to have up-to-date data by first clflushing. Don't
1292 * overcomplicate things and flush the entire patch. */
1293 partial_cacheline_write = needs_clflush_before &&
1294 ((shmem_page_offset | page_length)
1295 & (boot_cpu_data.x86_clflush_size - 1));
1297 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1298 (page_to_phys(page) & (1 << 17)) != 0;
1300 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1301 user_data, page_do_bit17_swizzling,
1302 partial_cacheline_write,
1303 needs_clflush_after);
1308 mutex_unlock(&dev->struct_mutex);
1309 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1310 user_data, page_do_bit17_swizzling,
1311 partial_cacheline_write,
1312 needs_clflush_after);
1314 mutex_lock(&dev->struct_mutex);
1320 remain -= page_length;
1321 user_data += page_length;
1322 offset += page_length;
1326 i915_gem_object_unpin_pages(obj);
1330 * Fixup: Flush cpu caches in case we didn't flush the dirty
1331 * cachelines in-line while writing and the object moved
1332 * out of the cpu write domain while we've dropped the lock.
1334 if (!needs_clflush_after &&
1335 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1336 if (i915_gem_clflush_object(obj, obj->pin_display))
1337 needs_clflush_after = true;
1341 if (needs_clflush_after)
1342 i915_gem_chipset_flush(to_i915(dev));
1344 obj->cache_dirty = true;
1346 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1351 * Writes data to the object referenced by handle.
1353 * @data: ioctl data blob
1356 * On error, the contents of the buffer that were to be modified are undefined.
1359 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1360 struct drm_file *file)
1362 struct drm_i915_private *dev_priv = to_i915(dev);
1363 struct drm_i915_gem_pwrite *args = data;
1364 struct drm_i915_gem_object *obj;
1367 if (args->size == 0)
1370 if (!access_ok(VERIFY_READ,
1371 u64_to_user_ptr(args->data_ptr),
1375 if (likely(!i915.prefault_disable)) {
1376 ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
1382 obj = i915_gem_object_lookup(file, args->handle);
1386 /* Bounds check destination. */
1387 if (args->offset > obj->base.size ||
1388 args->size > obj->base.size - args->offset) {
1393 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1395 ret = __unsafe_wait_rendering(obj, to_rps_client(file), false);
1399 intel_runtime_pm_get(dev_priv);
1401 ret = i915_mutex_lock_interruptible(dev);
1406 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1407 * it would end up going through the fenced access, and we'll get
1408 * different detiling behavior between reading and writing.
1409 * pread/pwrite currently are reading and writing from the CPU
1410 * perspective, requiring manual detiling by the client.
1412 if (!i915_gem_object_has_struct_page(obj) ||
1413 cpu_write_needs_clflush(obj)) {
1414 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
1415 /* Note that the gtt paths might fail with non-page-backed user
1416 * pointers (e.g. gtt mappings when moving data between
1417 * textures). Fallback to the shmem path in that case. */
1420 if (ret == -EFAULT || ret == -ENOSPC) {
1421 if (obj->phys_handle)
1422 ret = i915_gem_phys_pwrite(obj, args, file);
1423 else if (i915_gem_object_has_struct_page(obj))
1424 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1429 i915_gem_object_put(obj);
1430 mutex_unlock(&dev->struct_mutex);
1431 intel_runtime_pm_put(dev_priv);
1436 intel_runtime_pm_put(dev_priv);
1438 i915_gem_object_put_unlocked(obj);
1442 static enum fb_op_origin
1443 write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1445 return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
1446 ORIGIN_GTT : ORIGIN_CPU;
1450 * Called when user space prepares to use an object with the CPU, either
1451 * through the mmap ioctl's mapping or a GTT mapping.
1453 * @data: ioctl data blob
1457 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1458 struct drm_file *file)
1460 struct drm_i915_gem_set_domain *args = data;
1461 struct drm_i915_gem_object *obj;
1462 uint32_t read_domains = args->read_domains;
1463 uint32_t write_domain = args->write_domain;
1466 /* Only handle setting domains to types used by the CPU. */
1467 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1470 /* Having something in the write domain implies it's in the read
1471 * domain, and only that read domain. Enforce that in the request.
1473 if (write_domain != 0 && read_domains != write_domain)
1476 obj = i915_gem_object_lookup(file, args->handle);
1480 /* Try to flush the object off the GPU without holding the lock.
1481 * We will repeat the flush holding the lock in the normal manner
1482 * to catch cases where we are gazumped.
1484 ret = __unsafe_wait_rendering(obj, to_rps_client(file), !write_domain);
1488 ret = i915_mutex_lock_interruptible(dev);
1492 if (read_domains & I915_GEM_DOMAIN_GTT)
1493 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1495 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1497 if (write_domain != 0)
1498 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1500 i915_gem_object_put(obj);
1501 mutex_unlock(&dev->struct_mutex);
1505 i915_gem_object_put_unlocked(obj);
1510 * Called when user space has done writes to this buffer
1512 * @data: ioctl data blob
1516 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1517 struct drm_file *file)
1519 struct drm_i915_gem_sw_finish *args = data;
1520 struct drm_i915_gem_object *obj;
1523 obj = i915_gem_object_lookup(file, args->handle);
1527 /* Pinned buffers may be scanout, so flush the cache */
1528 if (READ_ONCE(obj->pin_display)) {
1529 err = i915_mutex_lock_interruptible(dev);
1531 i915_gem_object_flush_cpu_write_domain(obj);
1532 mutex_unlock(&dev->struct_mutex);
1536 i915_gem_object_put_unlocked(obj);
1541 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1544 * @data: ioctl data blob
1547 * While the mapping holds a reference on the contents of the object, it doesn't
1548 * imply a ref on the object itself.
1552 * DRM driver writers who look a this function as an example for how to do GEM
1553 * mmap support, please don't implement mmap support like here. The modern way
1554 * to implement DRM mmap support is with an mmap offset ioctl (like
1555 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1556 * That way debug tooling like valgrind will understand what's going on, hiding
1557 * the mmap call in a driver private ioctl will break that. The i915 driver only
1558 * does cpu mmaps this way because we didn't know better.
1561 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1562 struct drm_file *file)
1564 struct drm_i915_gem_mmap *args = data;
1565 struct drm_i915_gem_object *obj;
1568 if (args->flags & ~(I915_MMAP_WC))
1571 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1574 obj = i915_gem_object_lookup(file, args->handle);
1578 /* prime objects have no backing filp to GEM mmap
1581 if (!obj->base.filp) {
1582 i915_gem_object_put_unlocked(obj);
1586 addr = vm_mmap(obj->base.filp, 0, args->size,
1587 PROT_READ | PROT_WRITE, MAP_SHARED,
1589 if (args->flags & I915_MMAP_WC) {
1590 struct mm_struct *mm = current->mm;
1591 struct vm_area_struct *vma;
1593 if (down_write_killable(&mm->mmap_sem)) {
1594 i915_gem_object_put_unlocked(obj);
1597 vma = find_vma(mm, addr);
1600 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1603 up_write(&mm->mmap_sem);
1605 /* This may race, but that's ok, it only gets set */
1606 WRITE_ONCE(obj->has_wc_mmap, true);
1608 i915_gem_object_put_unlocked(obj);
1609 if (IS_ERR((void *)addr))
1612 args->addr_ptr = (uint64_t) addr;
1618 * i915_gem_fault - fault a page into the GTT
1619 * @vma: VMA in question
1622 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1623 * from userspace. The fault handler takes care of binding the object to
1624 * the GTT (if needed), allocating and programming a fence register (again,
1625 * only if needed based on whether the old reg is still valid or the object
1626 * is tiled) and inserting a new PTE into the faulting process.
1628 * Note that the faulting process may involve evicting existing objects
1629 * from the GTT and/or fence registers to make room. So performance may
1630 * suffer if the GTT working set is large or there are few fence registers
1633 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1635 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1636 struct drm_device *dev = obj->base.dev;
1637 struct drm_i915_private *dev_priv = to_i915(dev);
1638 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1639 struct i915_ggtt_view view = i915_ggtt_view_normal;
1640 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1641 pgoff_t page_offset;
1645 /* We don't use vmf->pgoff since that has the fake offset */
1646 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1649 trace_i915_gem_object_fault(obj, page_offset, true, write);
1651 /* Try to flush the object off the GPU first without holding the lock.
1652 * Upon acquiring the lock, we will perform our sanity checks and then
1653 * repeat the flush holding the lock in the normal manner to catch cases
1654 * where we are gazumped.
1656 ret = __unsafe_wait_rendering(obj, NULL, !write);
1660 intel_runtime_pm_get(dev_priv);
1662 ret = i915_mutex_lock_interruptible(dev);
1666 /* Access to snoopable pages through the GTT is incoherent. */
1667 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1672 /* Use a partial view if the object is bigger than the aperture. */
1673 if (obj->base.size >= ggtt->mappable_end &&
1674 !i915_gem_object_is_tiled(obj)) {
1675 static const unsigned int chunk_size = 256; // 1 MiB
1677 memset(&view, 0, sizeof(view));
1678 view.type = I915_GGTT_VIEW_PARTIAL;
1679 view.params.partial.offset = rounddown(page_offset, chunk_size);
1680 view.params.partial.size =
1683 (vma->vm_end - vma->vm_start)/PAGE_SIZE -
1684 view.params.partial.offset);
1687 /* Now pin it into the GTT if needed */
1688 ret = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1692 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1696 ret = i915_gem_object_get_fence(obj);
1700 /* Finally, remap it using the new GTT offset */
1701 pfn = ggtt->mappable_base +
1702 i915_gem_obj_ggtt_offset_view(obj, &view);
1705 if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
1706 /* Overriding existing pages in partial view does not cause
1707 * us any trouble as TLBs are still valid because the fault
1708 * is due to userspace losing part of the mapping or never
1709 * having accessed it before (at this partials' range).
1711 unsigned long base = vma->vm_start +
1712 (view.params.partial.offset << PAGE_SHIFT);
1715 for (i = 0; i < view.params.partial.size; i++) {
1716 ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
1721 obj->fault_mappable = true;
1723 if (!obj->fault_mappable) {
1724 unsigned long size = min_t(unsigned long,
1725 vma->vm_end - vma->vm_start,
1729 for (i = 0; i < size >> PAGE_SHIFT; i++) {
1730 ret = vm_insert_pfn(vma,
1731 (unsigned long)vma->vm_start + i * PAGE_SIZE,
1737 obj->fault_mappable = true;
1739 ret = vm_insert_pfn(vma,
1740 (unsigned long)vmf->virtual_address,
1744 i915_gem_object_ggtt_unpin_view(obj, &view);
1746 mutex_unlock(&dev->struct_mutex);
1748 intel_runtime_pm_put(dev_priv);
1753 * We eat errors when the gpu is terminally wedged to avoid
1754 * userspace unduly crashing (gl has no provisions for mmaps to
1755 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1756 * and so needs to be reported.
1758 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1759 ret = VM_FAULT_SIGBUS;
1764 * EAGAIN means the gpu is hung and we'll wait for the error
1765 * handler to reset everything when re-faulting in
1766 * i915_mutex_lock_interruptible.
1773 * EBUSY is ok: this just means that another thread
1774 * already did the job.
1776 ret = VM_FAULT_NOPAGE;
1783 ret = VM_FAULT_SIGBUS;
1786 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1787 ret = VM_FAULT_SIGBUS;
1794 * i915_gem_release_mmap - remove physical page mappings
1795 * @obj: obj in question
1797 * Preserve the reservation of the mmapping with the DRM core code, but
1798 * relinquish ownership of the pages back to the system.
1800 * It is vital that we remove the page mapping if we have mapped a tiled
1801 * object through the GTT and then lose the fence register due to
1802 * resource pressure. Similarly if the object has been moved out of the
1803 * aperture, than pages mapped into userspace must be revoked. Removing the
1804 * mapping will then trigger a page fault on the next user access, allowing
1805 * fixup by i915_gem_fault().
1808 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1810 /* Serialisation between user GTT access and our code depends upon
1811 * revoking the CPU's PTE whilst the mutex is held. The next user
1812 * pagefault then has to wait until we release the mutex.
1814 lockdep_assert_held(&obj->base.dev->struct_mutex);
1816 if (!obj->fault_mappable)
1819 drm_vma_node_unmap(&obj->base.vma_node,
1820 obj->base.dev->anon_inode->i_mapping);
1822 /* Ensure that the CPU's PTE are revoked and there are not outstanding
1823 * memory transactions from userspace before we return. The TLB
1824 * flushing implied above by changing the PTE above *should* be
1825 * sufficient, an extra barrier here just provides us with a bit
1826 * of paranoid documentation about our requirement to serialise
1827 * memory writes before touching registers / GSM.
1831 obj->fault_mappable = false;
1835 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1837 struct drm_i915_gem_object *obj;
1839 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1840 i915_gem_release_mmap(obj);
1844 * i915_gem_get_ggtt_size - return required global GTT size for an object
1845 * @dev_priv: i915 device
1846 * @size: object size
1847 * @tiling_mode: tiling mode
1849 * Return the required global GTT size for an object, taking into account
1850 * potential fence register mapping.
1852 u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
1853 u64 size, int tiling_mode)
1857 GEM_BUG_ON(size == 0);
1859 if (INTEL_GEN(dev_priv) >= 4 ||
1860 tiling_mode == I915_TILING_NONE)
1863 /* Previous chips need a power-of-two fence region when tiling */
1864 if (IS_GEN3(dev_priv))
1865 ggtt_size = 1024*1024;
1867 ggtt_size = 512*1024;
1869 while (ggtt_size < size)
1876 * i915_gem_get_ggtt_alignment - return required global GTT alignment
1877 * @dev_priv: i915 device
1878 * @size: object size
1879 * @tiling_mode: tiling mode
1880 * @fenced: is fenced alignment required or not
1882 * Return the required global GTT alignment for an object, taking into account
1883 * potential fence register mapping.
1885 u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
1886 int tiling_mode, bool fenced)
1888 GEM_BUG_ON(size == 0);
1891 * Minimum alignment is 4k (GTT page size), but might be greater
1892 * if a fence register is needed for the object.
1894 if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
1895 tiling_mode == I915_TILING_NONE)
1899 * Previous chips need to be aligned to the size of the smallest
1900 * fence register that can contain the object.
1902 return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
1905 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1907 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1910 err = drm_gem_create_mmap_offset(&obj->base);
1914 /* We can idle the GPU locklessly to flush stale objects, but in order
1915 * to claim that space for ourselves, we need to take the big
1916 * struct_mutex to free the requests+objects and allocate our slot.
1918 err = i915_gem_wait_for_idle(dev_priv, true);
1922 err = i915_mutex_lock_interruptible(&dev_priv->drm);
1924 i915_gem_retire_requests(dev_priv);
1925 err = drm_gem_create_mmap_offset(&obj->base);
1926 mutex_unlock(&dev_priv->drm.struct_mutex);
1932 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1934 drm_gem_free_mmap_offset(&obj->base);
1938 i915_gem_mmap_gtt(struct drm_file *file,
1939 struct drm_device *dev,
1943 struct drm_i915_gem_object *obj;
1946 obj = i915_gem_object_lookup(file, handle);
1950 ret = i915_gem_object_create_mmap_offset(obj);
1952 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1954 i915_gem_object_put_unlocked(obj);
1959 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1961 * @data: GTT mapping ioctl data
1962 * @file: GEM object info
1964 * Simply returns the fake offset to userspace so it can mmap it.
1965 * The mmap call will end up in drm_gem_mmap(), which will set things
1966 * up so we can get faults in the handler above.
1968 * The fault handler will take care of binding the object into the GTT
1969 * (since it may have been evicted to make room for something), allocating
1970 * a fence register, and mapping the appropriate aperture address into
1974 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1975 struct drm_file *file)
1977 struct drm_i915_gem_mmap_gtt *args = data;
1979 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1982 /* Immediately discard the backing storage */
1984 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1986 i915_gem_object_free_mmap_offset(obj);
1988 if (obj->base.filp == NULL)
1991 /* Our goal here is to return as much of the memory as
1992 * is possible back to the system as we are called from OOM.
1993 * To do this we must instruct the shmfs to drop all of its
1994 * backing pages, *now*.
1996 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
1997 obj->madv = __I915_MADV_PURGED;
2000 /* Try to discard unwanted pages */
2002 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2004 struct address_space *mapping;
2006 switch (obj->madv) {
2007 case I915_MADV_DONTNEED:
2008 i915_gem_object_truncate(obj);
2009 case __I915_MADV_PURGED:
2013 if (obj->base.filp == NULL)
2016 mapping = obj->base.filp->f_mapping,
2017 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2021 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2023 struct sgt_iter sgt_iter;
2027 BUG_ON(obj->madv == __I915_MADV_PURGED);
2029 ret = i915_gem_object_set_to_cpu_domain(obj, true);
2031 /* In the event of a disaster, abandon all caches and
2032 * hope for the best.
2034 i915_gem_clflush_object(obj, true);
2035 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2038 i915_gem_gtt_finish_object(obj);
2040 if (i915_gem_object_needs_bit17_swizzle(obj))
2041 i915_gem_object_save_bit_17_swizzle(obj);
2043 if (obj->madv == I915_MADV_DONTNEED)
2046 for_each_sgt_page(page, sgt_iter, obj->pages) {
2048 set_page_dirty(page);
2050 if (obj->madv == I915_MADV_WILLNEED)
2051 mark_page_accessed(page);
2057 sg_free_table(obj->pages);
2062 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2064 const struct drm_i915_gem_object_ops *ops = obj->ops;
2066 if (obj->pages == NULL)
2069 if (obj->pages_pin_count)
2072 GEM_BUG_ON(obj->bind_count);
2074 /* ->put_pages might need to allocate memory for the bit17 swizzle
2075 * array, hence protect them from being reaped by removing them from gtt
2077 list_del(&obj->global_list);
2080 if (is_vmalloc_addr(obj->mapping))
2081 vunmap(obj->mapping);
2083 kunmap(kmap_to_page(obj->mapping));
2084 obj->mapping = NULL;
2087 ops->put_pages(obj);
2090 i915_gem_object_invalidate(obj);
2096 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2098 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2100 struct address_space *mapping;
2101 struct sg_table *st;
2102 struct scatterlist *sg;
2103 struct sgt_iter sgt_iter;
2105 unsigned long last_pfn = 0; /* suppress gcc warning */
2109 /* Assert that the object is not currently in any GPU domain. As it
2110 * wasn't in the GTT, there shouldn't be any way it could have been in
2113 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2114 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2116 st = kmalloc(sizeof(*st), GFP_KERNEL);
2120 page_count = obj->base.size / PAGE_SIZE;
2121 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2126 /* Get the list of pages out of our struct file. They'll be pinned
2127 * at this point until we release them.
2129 * Fail silently without starting the shrinker
2131 mapping = obj->base.filp->f_mapping;
2132 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2133 gfp |= __GFP_NORETRY | __GFP_NOWARN;
2136 for (i = 0; i < page_count; i++) {
2137 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2139 i915_gem_shrink(dev_priv,
2142 I915_SHRINK_UNBOUND |
2143 I915_SHRINK_PURGEABLE);
2144 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2147 /* We've tried hard to allocate the memory by reaping
2148 * our own buffer, now let the real VM do its job and
2149 * go down in flames if truly OOM.
2151 i915_gem_shrink_all(dev_priv);
2152 page = shmem_read_mapping_page(mapping, i);
2154 ret = PTR_ERR(page);
2158 #ifdef CONFIG_SWIOTLB
2159 if (swiotlb_nr_tbl()) {
2161 sg_set_page(sg, page, PAGE_SIZE, 0);
2166 if (!i || page_to_pfn(page) != last_pfn + 1) {
2170 sg_set_page(sg, page, PAGE_SIZE, 0);
2172 sg->length += PAGE_SIZE;
2174 last_pfn = page_to_pfn(page);
2176 /* Check that the i965g/gm workaround works. */
2177 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2179 #ifdef CONFIG_SWIOTLB
2180 if (!swiotlb_nr_tbl())
2185 ret = i915_gem_gtt_prepare_object(obj);
2189 if (i915_gem_object_needs_bit17_swizzle(obj))
2190 i915_gem_object_do_bit_17_swizzle(obj);
2192 if (i915_gem_object_is_tiled(obj) &&
2193 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2194 i915_gem_object_pin_pages(obj);
2200 for_each_sgt_page(page, sgt_iter, st)
2205 /* shmemfs first checks if there is enough memory to allocate the page
2206 * and reports ENOSPC should there be insufficient, along with the usual
2207 * ENOMEM for a genuine allocation failure.
2209 * We use ENOSPC in our driver to mean that we have run out of aperture
2210 * space and so want to translate the error from shmemfs back to our
2211 * usual understanding of ENOMEM.
2219 /* Ensure that the associated pages are gathered from the backing storage
2220 * and pinned into our object. i915_gem_object_get_pages() may be called
2221 * multiple times before they are released by a single call to
2222 * i915_gem_object_put_pages() - once the pages are no longer referenced
2223 * either as a result of memory pressure (reaping pages under the shrinker)
2224 * or as the object is itself released.
2227 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2229 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2230 const struct drm_i915_gem_object_ops *ops = obj->ops;
2236 if (obj->madv != I915_MADV_WILLNEED) {
2237 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2241 BUG_ON(obj->pages_pin_count);
2243 ret = ops->get_pages(obj);
2247 list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2249 obj->get_page.sg = obj->pages->sgl;
2250 obj->get_page.last = 0;
2255 /* The 'mapping' part of i915_gem_object_pin_map() below */
2256 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2258 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2259 struct sg_table *sgt = obj->pages;
2260 struct sgt_iter sgt_iter;
2262 struct page *stack_pages[32];
2263 struct page **pages = stack_pages;
2264 unsigned long i = 0;
2267 /* A single page can always be kmapped */
2269 return kmap(sg_page(sgt->sgl));
2271 if (n_pages > ARRAY_SIZE(stack_pages)) {
2272 /* Too big for stack -- allocate temporary array instead */
2273 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2278 for_each_sgt_page(page, sgt_iter, sgt)
2281 /* Check that we have the expected number of pages */
2282 GEM_BUG_ON(i != n_pages);
2284 addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
2286 if (pages != stack_pages)
2287 drm_free_large(pages);
2292 /* get, pin, and map the pages of the object into kernel space */
2293 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2297 lockdep_assert_held(&obj->base.dev->struct_mutex);
2299 ret = i915_gem_object_get_pages(obj);
2301 return ERR_PTR(ret);
2303 i915_gem_object_pin_pages(obj);
2305 if (!obj->mapping) {
2306 obj->mapping = i915_gem_object_map(obj);
2307 if (!obj->mapping) {
2308 i915_gem_object_unpin_pages(obj);
2309 return ERR_PTR(-ENOMEM);
2313 return obj->mapping;
2317 i915_gem_object_retire__write(struct i915_gem_active *active,
2318 struct drm_i915_gem_request *request)
2320 struct drm_i915_gem_object *obj =
2321 container_of(active, struct drm_i915_gem_object, last_write);
2323 intel_fb_obj_flush(obj, true, ORIGIN_CS);
2327 i915_gem_object_retire__read(struct i915_gem_active *active,
2328 struct drm_i915_gem_request *request)
2330 int idx = request->engine->id;
2331 struct drm_i915_gem_object *obj =
2332 container_of(active, struct drm_i915_gem_object, last_read[idx]);
2334 GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
2336 i915_gem_object_clear_active(obj, idx);
2337 if (i915_gem_object_is_active(obj))
2340 /* Bump our place on the bound list to keep it roughly in LRU order
2341 * so that we don't steal from recently used but inactive objects
2342 * (unless we are forced to ofc!)
2344 if (obj->bind_count)
2345 list_move_tail(&obj->global_list,
2346 &request->i915->mm.bound_list);
2348 i915_gem_object_put(obj);
2351 static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2353 unsigned long elapsed;
2355 if (ctx->hang_stats.banned)
2358 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2359 if (ctx->hang_stats.ban_period_seconds &&
2360 elapsed <= ctx->hang_stats.ban_period_seconds) {
2361 DRM_DEBUG("context hanging too fast, banning!\n");
2368 static void i915_set_reset_status(struct i915_gem_context *ctx,
2371 struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
2374 hs->banned = i915_context_is_banned(ctx);
2376 hs->guilty_ts = get_seconds();
2378 hs->batch_pending++;
2382 struct drm_i915_gem_request *
2383 i915_gem_find_active_request(struct intel_engine_cs *engine)
2385 struct drm_i915_gem_request *request;
2387 /* We are called by the error capture and reset at a random
2388 * point in time. In particular, note that neither is crucially
2389 * ordered with an interrupt. After a hang, the GPU is dead and we
2390 * assume that no more writes can happen (we waited long enough for
2391 * all writes that were in transaction to be flushed) - adding an
2392 * extra delay for a recent interrupt is pointless. Hence, we do
2393 * not need an engine->irq_seqno_barrier() before the seqno reads.
2395 list_for_each_entry(request, &engine->request_list, link) {
2396 if (i915_gem_request_completed(request))
2405 static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
2407 struct drm_i915_gem_request *request;
2410 request = i915_gem_find_active_request(engine);
2411 if (request == NULL)
2414 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2416 i915_set_reset_status(request->ctx, ring_hung);
2417 list_for_each_entry_continue(request, &engine->request_list, link)
2418 i915_set_reset_status(request->ctx, false);
2421 static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
2423 struct drm_i915_gem_request *request;
2424 struct intel_ring *ring;
2426 request = i915_gem_active_peek(&engine->last_request,
2427 &engine->i915->drm.struct_mutex);
2429 /* Mark all pending requests as complete so that any concurrent
2430 * (lockless) lookup doesn't try and wait upon the request as we
2434 intel_engine_init_seqno(engine, request->fence.seqno);
2437 * Clear the execlists queue up before freeing the requests, as those
2438 * are the ones that keep the context and ringbuffer backing objects
2442 if (i915.enable_execlists) {
2443 /* Ensure irq handler finishes or is cancelled. */
2444 tasklet_kill(&engine->irq_tasklet);
2446 intel_execlists_cancel_requests(engine);
2450 * We must free the requests after all the corresponding objects have
2451 * been moved off active lists. Which is the same order as the normal
2452 * retire_requests function does. This is important if object hold
2453 * implicit references on things like e.g. ppgtt address spaces through
2457 i915_gem_request_retire_upto(request);
2458 GEM_BUG_ON(intel_engine_is_active(engine));
2460 /* Having flushed all requests from all queues, we know that all
2461 * ringbuffers must now be empty. However, since we do not reclaim
2462 * all space when retiring the request (to prevent HEADs colliding
2463 * with rapid ringbuffer wraparound) the amount of available space
2464 * upon reset is less than when we start. Do one more pass over
2465 * all the ringbuffers to reset last_retired_head.
2467 list_for_each_entry(ring, &engine->buffers, link) {
2468 ring->last_retired_head = ring->tail;
2469 intel_ring_update_space(ring);
2472 engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
2475 void i915_gem_reset(struct drm_device *dev)
2477 struct drm_i915_private *dev_priv = to_i915(dev);
2478 struct intel_engine_cs *engine;
2481 * Before we free the objects from the requests, we need to inspect
2482 * them for finding the guilty party. As the requests only borrow
2483 * their reference to the objects, the inspection must be done first.
2485 for_each_engine(engine, dev_priv)
2486 i915_gem_reset_engine_status(engine);
2488 for_each_engine(engine, dev_priv)
2489 i915_gem_reset_engine_cleanup(engine);
2490 mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2492 i915_gem_context_reset(dev);
2494 i915_gem_restore_fences(dev);
2498 i915_gem_retire_work_handler(struct work_struct *work)
2500 struct drm_i915_private *dev_priv =
2501 container_of(work, typeof(*dev_priv), gt.retire_work.work);
2502 struct drm_device *dev = &dev_priv->drm;
2504 /* Come back later if the device is busy... */
2505 if (mutex_trylock(&dev->struct_mutex)) {
2506 i915_gem_retire_requests(dev_priv);
2507 mutex_unlock(&dev->struct_mutex);
2510 /* Keep the retire handler running until we are finally idle.
2511 * We do not need to do this test under locking as in the worst-case
2512 * we queue the retire worker once too often.
2514 if (READ_ONCE(dev_priv->gt.awake)) {
2515 i915_queue_hangcheck(dev_priv);
2516 queue_delayed_work(dev_priv->wq,
2517 &dev_priv->gt.retire_work,
2518 round_jiffies_up_relative(HZ));
2523 i915_gem_idle_work_handler(struct work_struct *work)
2525 struct drm_i915_private *dev_priv =
2526 container_of(work, typeof(*dev_priv), gt.idle_work.work);
2527 struct drm_device *dev = &dev_priv->drm;
2528 struct intel_engine_cs *engine;
2529 unsigned int stuck_engines;
2530 bool rearm_hangcheck;
2532 if (!READ_ONCE(dev_priv->gt.awake))
2535 if (READ_ONCE(dev_priv->gt.active_engines))
2539 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
2541 if (!mutex_trylock(&dev->struct_mutex)) {
2542 /* Currently busy, come back later */
2543 mod_delayed_work(dev_priv->wq,
2544 &dev_priv->gt.idle_work,
2545 msecs_to_jiffies(50));
2549 if (dev_priv->gt.active_engines)
2552 for_each_engine(engine, dev_priv)
2553 i915_gem_batch_pool_fini(&engine->batch_pool);
2555 GEM_BUG_ON(!dev_priv->gt.awake);
2556 dev_priv->gt.awake = false;
2557 rearm_hangcheck = false;
2559 /* As we have disabled hangcheck, we need to unstick any waiters still
2560 * hanging around. However, as we may be racing against the interrupt
2561 * handler or the waiters themselves, we skip enabling the fake-irq.
2563 stuck_engines = intel_kick_waiters(dev_priv);
2564 if (unlikely(stuck_engines))
2565 DRM_DEBUG_DRIVER("kicked stuck waiters (%x)...missed irq?\n",
2568 if (INTEL_GEN(dev_priv) >= 6)
2569 gen6_rps_idle(dev_priv);
2570 intel_runtime_pm_put(dev_priv);
2572 mutex_unlock(&dev->struct_mutex);
2575 if (rearm_hangcheck) {
2576 GEM_BUG_ON(!dev_priv->gt.awake);
2577 i915_queue_hangcheck(dev_priv);
2581 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
2583 struct drm_i915_gem_object *obj = to_intel_bo(gem);
2584 struct drm_i915_file_private *fpriv = file->driver_priv;
2585 struct i915_vma *vma, *vn;
2587 mutex_lock(&obj->base.dev->struct_mutex);
2588 list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
2589 if (vma->vm->file == fpriv)
2590 i915_vma_close(vma);
2591 mutex_unlock(&obj->base.dev->struct_mutex);
2595 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2596 * @dev: drm device pointer
2597 * @data: ioctl data blob
2598 * @file: drm file pointer
2600 * Returns 0 if successful, else an error is returned with the remaining time in
2601 * the timeout parameter.
2602 * -ETIME: object is still busy after timeout
2603 * -ERESTARTSYS: signal interrupted the wait
2604 * -ENONENT: object doesn't exist
2605 * Also possible, but rare:
2606 * -EAGAIN: GPU wedged
2608 * -ENODEV: Internal IRQ fail
2609 * -E?: The add request failed
2611 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2612 * non-zero timeout parameter the wait ioctl will wait for the given number of
2613 * nanoseconds on an object becoming unbusy. Since the wait itself does so
2614 * without holding struct_mutex the object may become re-busied before this
2615 * function completes. A similar but shorter * race condition exists in the busy
2619 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2621 struct drm_i915_gem_wait *args = data;
2622 struct intel_rps_client *rps = to_rps_client(file);
2623 struct drm_i915_gem_object *obj;
2624 unsigned long active;
2627 if (args->flags != 0)
2630 obj = i915_gem_object_lookup(file, args->bo_handle);
2634 active = __I915_BO_ACTIVE(obj);
2635 for_each_active(active, idx) {
2636 s64 *timeout = args->timeout_ns >= 0 ? &args->timeout_ns : NULL;
2637 ret = i915_gem_active_wait_unlocked(&obj->last_read[idx], true,
2643 i915_gem_object_put_unlocked(obj);
2648 __i915_gem_object_sync(struct drm_i915_gem_request *to,
2649 struct drm_i915_gem_request *from)
2653 if (to->engine == from->engine)
2656 if (!i915.semaphores) {
2657 ret = i915_wait_request(from,
2658 from->i915->mm.interruptible,
2664 int idx = intel_engine_sync_index(from->engine, to->engine);
2665 if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
2668 trace_i915_gem_ring_sync_to(to, from);
2669 ret = to->engine->semaphore.sync_to(to, from);
2673 from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
2680 * i915_gem_object_sync - sync an object to a ring.
2682 * @obj: object which may be in use on another ring.
2683 * @to: request we are wishing to use
2685 * This code is meant to abstract object synchronization with the GPU.
2686 * Conceptually we serialise writes between engines inside the GPU.
2687 * We only allow one engine to write into a buffer at any time, but
2688 * multiple readers. To ensure each has a coherent view of memory, we must:
2690 * - If there is an outstanding write request to the object, the new
2691 * request must wait for it to complete (either CPU or in hw, requests
2692 * on the same ring will be naturally ordered).
2694 * - If we are a write request (pending_write_domain is set), the new
2695 * request must wait for outstanding read requests to complete.
2697 * Returns 0 if successful, else propagates up the lower layer error.
2700 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2701 struct drm_i915_gem_request *to)
2703 struct i915_gem_active *active;
2704 unsigned long active_mask;
2707 lockdep_assert_held(&obj->base.dev->struct_mutex);
2709 active_mask = i915_gem_object_get_active(obj);
2713 if (obj->base.pending_write_domain) {
2714 active = obj->last_read;
2717 active = &obj->last_write;
2720 for_each_active(active_mask, idx) {
2721 struct drm_i915_gem_request *request;
2724 request = i915_gem_active_peek(&active[idx],
2725 &obj->base.dev->struct_mutex);
2729 ret = __i915_gem_object_sync(to, request);
2737 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
2739 u32 old_write_domain, old_read_domains;
2741 /* Force a pagefault for domain tracking on next user access */
2742 i915_gem_release_mmap(obj);
2744 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
2747 old_read_domains = obj->base.read_domains;
2748 old_write_domain = obj->base.write_domain;
2750 obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
2751 obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
2753 trace_i915_gem_object_change_domain(obj,
2758 static void __i915_vma_iounmap(struct i915_vma *vma)
2760 GEM_BUG_ON(i915_vma_is_pinned(vma));
2762 if (vma->iomap == NULL)
2765 io_mapping_unmap(vma->iomap);
2769 int i915_vma_unbind(struct i915_vma *vma)
2771 struct drm_i915_gem_object *obj = vma->obj;
2772 unsigned long active;
2775 /* First wait upon any activity as retiring the request may
2776 * have side-effects such as unpinning or even unbinding this vma.
2778 active = i915_vma_get_active(vma);
2782 /* When a closed VMA is retired, it is unbound - eek.
2783 * In order to prevent it from being recursively closed,
2784 * take a pin on the vma so that the second unbind is
2787 __i915_vma_pin(vma);
2789 for_each_active(active, idx) {
2790 ret = i915_gem_active_retire(&vma->last_read[idx],
2791 &vma->vm->dev->struct_mutex);
2796 __i915_vma_unpin(vma);
2800 GEM_BUG_ON(i915_vma_is_active(vma));
2803 if (i915_vma_is_pinned(vma))
2806 if (!drm_mm_node_allocated(&vma->node))
2809 GEM_BUG_ON(obj->bind_count == 0);
2810 GEM_BUG_ON(!obj->pages);
2812 if (i915_vma_is_ggtt(vma) &&
2813 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
2814 i915_gem_object_finish_gtt(obj);
2816 /* release the fence reg _after_ flushing */
2817 ret = i915_gem_object_put_fence(obj);
2821 __i915_vma_iounmap(vma);
2824 if (likely(!vma->vm->closed)) {
2825 trace_i915_vma_unbind(vma);
2826 vma->vm->unbind_vma(vma);
2828 vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
2830 drm_mm_remove_node(&vma->node);
2831 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
2833 if (i915_vma_is_ggtt(vma)) {
2834 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
2835 obj->map_and_fenceable = false;
2836 } else if (vma->ggtt_view.pages) {
2837 sg_free_table(vma->ggtt_view.pages);
2838 kfree(vma->ggtt_view.pages);
2840 vma->ggtt_view.pages = NULL;
2843 /* Since the unbound list is global, only move to that list if
2844 * no more VMAs exist. */
2845 if (--obj->bind_count == 0)
2846 list_move_tail(&obj->global_list,
2847 &to_i915(obj->base.dev)->mm.unbound_list);
2849 /* And finally now the object is completely decoupled from this vma,
2850 * we can drop its hold on the backing storage and allow it to be
2851 * reaped by the shrinker.
2853 i915_gem_object_unpin_pages(obj);
2856 if (unlikely(i915_vma_is_closed(vma)))
2857 i915_vma_destroy(vma);
2862 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
2865 struct intel_engine_cs *engine;
2868 for_each_engine(engine, dev_priv) {
2869 if (engine->last_context == NULL)
2872 ret = intel_engine_idle(engine, interruptible);
2880 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
2881 unsigned long cache_level)
2883 struct drm_mm_node *gtt_space = &vma->node;
2884 struct drm_mm_node *other;
2887 * On some machines we have to be careful when putting differing types
2888 * of snoopable memory together to avoid the prefetcher crossing memory
2889 * domains and dying. During vm initialisation, we decide whether or not
2890 * these constraints apply and set the drm_mm.color_adjust
2893 if (vma->vm->mm.color_adjust == NULL)
2896 if (!drm_mm_node_allocated(gtt_space))
2899 if (list_empty(>t_space->node_list))
2902 other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
2903 if (other->allocated && !other->hole_follows && other->color != cache_level)
2906 other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
2907 if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
2914 * i915_vma_insert - finds a slot for the vma in its address space
2916 * @size: requested size in bytes (can be larger than the VMA)
2917 * @alignment: required alignment
2918 * @flags: mask of PIN_* flags to use
2920 * First we try to allocate some free space that meets the requirements for
2921 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
2922 * preferrably the oldest idle entry to make room for the new VMA.
2925 * 0 on success, negative error code otherwise.
2928 i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
2930 struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
2931 struct drm_i915_gem_object *obj = vma->obj;
2936 GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
2937 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
2939 size = max(size, vma->size);
2940 if (flags & PIN_MAPPABLE)
2941 size = i915_gem_get_ggtt_size(dev_priv, size,
2942 i915_gem_object_get_tiling(obj));
2945 i915_gem_get_ggtt_alignment(dev_priv, size,
2946 i915_gem_object_get_tiling(obj),
2947 flags & PIN_MAPPABLE);
2949 alignment = min_alignment;
2950 if (alignment & (min_alignment - 1)) {
2951 DRM_DEBUG("Invalid object alignment requested %llu, minimum %llu\n",
2952 alignment, min_alignment);
2956 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
2958 end = vma->vm->total;
2959 if (flags & PIN_MAPPABLE)
2960 end = min_t(u64, end, dev_priv->ggtt.mappable_end);
2961 if (flags & PIN_ZONE_4G)
2962 end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
2964 /* If binding the object/GGTT view requires more space than the entire
2965 * aperture has, reject it early before evicting everything in a vain
2966 * attempt to find space.
2969 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
2970 size, obj->base.size,
2971 flags & PIN_MAPPABLE ? "mappable" : "total",
2976 ret = i915_gem_object_get_pages(obj);
2980 i915_gem_object_pin_pages(obj);
2982 if (flags & PIN_OFFSET_FIXED) {
2983 u64 offset = flags & PIN_OFFSET_MASK;
2984 if (offset & (alignment - 1) || offset > end - size) {
2989 vma->node.start = offset;
2990 vma->node.size = size;
2991 vma->node.color = obj->cache_level;
2992 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
2994 ret = i915_gem_evict_for_vma(vma);
2996 ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3001 u32 search_flag, alloc_flag;
3003 if (flags & PIN_HIGH) {
3004 search_flag = DRM_MM_SEARCH_BELOW;
3005 alloc_flag = DRM_MM_CREATE_TOP;
3007 search_flag = DRM_MM_SEARCH_DEFAULT;
3008 alloc_flag = DRM_MM_CREATE_DEFAULT;
3011 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3012 * so we know that we always have a minimum alignment of 4096.
3013 * The drm_mm range manager is optimised to return results
3014 * with zero alignment, so where possible use the optimal
3017 if (alignment <= 4096)
3021 ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
3029 ret = i915_gem_evict_something(vma->vm, size, alignment,
3039 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
3041 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3042 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3048 i915_gem_object_unpin_pages(obj);
3053 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3056 /* If we don't have a page list set up, then we're not pinned
3057 * to GPU, and we can ignore the cache flush because it'll happen
3058 * again at bind time.
3060 if (obj->pages == NULL)
3064 * Stolen memory is always coherent with the GPU as it is explicitly
3065 * marked as wc by the system, or the system is cache-coherent.
3067 if (obj->stolen || obj->phys_handle)
3070 /* If the GPU is snooping the contents of the CPU cache,
3071 * we do not need to manually clear the CPU cache lines. However,
3072 * the caches are only snooped when the render cache is
3073 * flushed/invalidated. As we always have to emit invalidations
3074 * and flushes when moving into and out of the RENDER domain, correct
3075 * snooping behaviour occurs naturally as the result of our domain
3078 if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
3079 obj->cache_dirty = true;
3083 trace_i915_gem_object_clflush(obj);
3084 drm_clflush_sg(obj->pages);
3085 obj->cache_dirty = false;
3090 /** Flushes the GTT write domain for the object if it's dirty. */
3092 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3094 uint32_t old_write_domain;
3096 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3099 /* No actual flushing is required for the GTT write domain. Writes
3100 * to it immediately go to main memory as far as we know, so there's
3101 * no chipset flush. It also doesn't land in render cache.
3103 * However, we do have to enforce the order so that all writes through
3104 * the GTT land before any writes to the device, such as updates to
3109 old_write_domain = obj->base.write_domain;
3110 obj->base.write_domain = 0;
3112 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
3114 trace_i915_gem_object_change_domain(obj,
3115 obj->base.read_domains,
3119 /** Flushes the CPU write domain for the object if it's dirty. */
3121 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3123 uint32_t old_write_domain;
3125 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3128 if (i915_gem_clflush_object(obj, obj->pin_display))
3129 i915_gem_chipset_flush(to_i915(obj->base.dev));
3131 old_write_domain = obj->base.write_domain;
3132 obj->base.write_domain = 0;
3134 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3136 trace_i915_gem_object_change_domain(obj,
3137 obj->base.read_domains,
3142 * Moves a single object to the GTT read, and possibly write domain.
3143 * @obj: object to act on
3144 * @write: ask for write access or read only
3146 * This function returns when the move is complete, including waiting on
3150 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3152 uint32_t old_write_domain, old_read_domains;
3153 struct i915_vma *vma;
3156 ret = i915_gem_object_wait_rendering(obj, !write);
3160 if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3163 /* Flush and acquire obj->pages so that we are coherent through
3164 * direct access in memory with previous cached writes through
3165 * shmemfs and that our cache domain tracking remains valid.
3166 * For example, if the obj->filp was moved to swap without us
3167 * being notified and releasing the pages, we would mistakenly
3168 * continue to assume that the obj remained out of the CPU cached
3171 ret = i915_gem_object_get_pages(obj);
3175 i915_gem_object_flush_cpu_write_domain(obj);
3177 /* Serialise direct access to this object with the barriers for
3178 * coherent writes from the GPU, by effectively invalidating the
3179 * GTT domain upon first access.
3181 if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3184 old_write_domain = obj->base.write_domain;
3185 old_read_domains = obj->base.read_domains;
3187 /* It should now be out of any other write domains, and we can update
3188 * the domain values for our changes.
3190 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3191 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3193 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3194 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3198 trace_i915_gem_object_change_domain(obj,
3202 /* And bump the LRU for this access */
3203 vma = i915_gem_obj_to_ggtt(obj);
3205 drm_mm_node_allocated(&vma->node) &&
3206 !i915_vma_is_active(vma))
3207 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3213 * Changes the cache-level of an object across all VMA.
3214 * @obj: object to act on
3215 * @cache_level: new cache level to set for the object
3217 * After this function returns, the object will be in the new cache-level
3218 * across all GTT and the contents of the backing storage will be coherent,
3219 * with respect to the new cache-level. In order to keep the backing storage
3220 * coherent for all users, we only allow a single cache level to be set
3221 * globally on the object and prevent it from being changed whilst the
3222 * hardware is reading from the object. That is if the object is currently
3223 * on the scanout it will be set to uncached (or equivalent display
3224 * cache coherency) and all non-MOCS GPU access will also be uncached so
3225 * that all direct access to the scanout remains coherent.
3227 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3228 enum i915_cache_level cache_level)
3230 struct i915_vma *vma;
3233 if (obj->cache_level == cache_level)
3236 /* Inspect the list of currently bound VMA and unbind any that would
3237 * be invalid given the new cache-level. This is principally to
3238 * catch the issue of the CS prefetch crossing page boundaries and
3239 * reading an invalid PTE on older architectures.
3242 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3243 if (!drm_mm_node_allocated(&vma->node))
3246 if (i915_vma_is_pinned(vma)) {
3247 DRM_DEBUG("can not change the cache level of pinned objects\n");
3251 if (i915_gem_valid_gtt_space(vma, cache_level))
3254 ret = i915_vma_unbind(vma);
3258 /* As unbinding may affect other elements in the
3259 * obj->vma_list (due to side-effects from retiring
3260 * an active vma), play safe and restart the iterator.
3265 /* We can reuse the existing drm_mm nodes but need to change the
3266 * cache-level on the PTE. We could simply unbind them all and
3267 * rebind with the correct cache-level on next use. However since
3268 * we already have a valid slot, dma mapping, pages etc, we may as
3269 * rewrite the PTE in the belief that doing so tramples upon less
3270 * state and so involves less work.
3272 if (obj->bind_count) {
3273 /* Before we change the PTE, the GPU must not be accessing it.
3274 * If we wait upon the object, we know that all the bound
3275 * VMA are no longer active.
3277 ret = i915_gem_object_wait_rendering(obj, false);
3281 if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
3282 /* Access to snoopable pages through the GTT is
3283 * incoherent and on some machines causes a hard
3284 * lockup. Relinquish the CPU mmaping to force
3285 * userspace to refault in the pages and we can
3286 * then double check if the GTT mapping is still
3287 * valid for that pointer access.
3289 i915_gem_release_mmap(obj);
3291 /* As we no longer need a fence for GTT access,
3292 * we can relinquish it now (and so prevent having
3293 * to steal a fence from someone else on the next
3294 * fence request). Note GPU activity would have
3295 * dropped the fence as all snoopable access is
3296 * supposed to be linear.
3298 ret = i915_gem_object_put_fence(obj);
3302 /* We either have incoherent backing store and
3303 * so no GTT access or the architecture is fully
3304 * coherent. In such cases, existing GTT mmaps
3305 * ignore the cache bit in the PTE and we can
3306 * rewrite it without confusing the GPU or having
3307 * to force userspace to fault back in its mmaps.
3311 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3312 if (!drm_mm_node_allocated(&vma->node))
3315 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3321 list_for_each_entry(vma, &obj->vma_list, obj_link)
3322 vma->node.color = cache_level;
3323 obj->cache_level = cache_level;
3326 /* Flush the dirty CPU caches to the backing storage so that the
3327 * object is now coherent at its new cache level (with respect
3328 * to the access domain).
3330 if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
3331 if (i915_gem_clflush_object(obj, true))
3332 i915_gem_chipset_flush(to_i915(obj->base.dev));
3338 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3339 struct drm_file *file)
3341 struct drm_i915_gem_caching *args = data;
3342 struct drm_i915_gem_object *obj;
3344 obj = i915_gem_object_lookup(file, args->handle);
3348 switch (obj->cache_level) {
3349 case I915_CACHE_LLC:
3350 case I915_CACHE_L3_LLC:
3351 args->caching = I915_CACHING_CACHED;
3355 args->caching = I915_CACHING_DISPLAY;
3359 args->caching = I915_CACHING_NONE;
3363 i915_gem_object_put_unlocked(obj);
3367 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3368 struct drm_file *file)
3370 struct drm_i915_private *dev_priv = to_i915(dev);
3371 struct drm_i915_gem_caching *args = data;
3372 struct drm_i915_gem_object *obj;
3373 enum i915_cache_level level;
3376 switch (args->caching) {
3377 case I915_CACHING_NONE:
3378 level = I915_CACHE_NONE;
3380 case I915_CACHING_CACHED:
3382 * Due to a HW issue on BXT A stepping, GPU stores via a
3383 * snooped mapping may leave stale data in a corresponding CPU
3384 * cacheline, whereas normally such cachelines would get
3387 if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
3390 level = I915_CACHE_LLC;
3392 case I915_CACHING_DISPLAY:
3393 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3399 intel_runtime_pm_get(dev_priv);
3401 ret = i915_mutex_lock_interruptible(dev);
3405 obj = i915_gem_object_lookup(file, args->handle);
3411 ret = i915_gem_object_set_cache_level(obj, level);
3413 i915_gem_object_put(obj);
3415 mutex_unlock(&dev->struct_mutex);
3417 intel_runtime_pm_put(dev_priv);
3423 * Prepare buffer for display plane (scanout, cursors, etc).
3424 * Can be called from an uninterruptible phase (modesetting) and allows
3425 * any flushes to be pipelined (for pageflips).
3428 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3430 const struct i915_ggtt_view *view)
3432 u32 old_read_domains, old_write_domain;
3435 /* Mark the pin_display early so that we account for the
3436 * display coherency whilst setting up the cache domains.
3440 /* The display engine is not coherent with the LLC cache on gen6. As
3441 * a result, we make sure that the pinning that is about to occur is
3442 * done with uncached PTEs. This is lowest common denominator for all
3445 * However for gen6+, we could do better by using the GFDT bit instead
3446 * of uncaching, which would allow us to flush all the LLC-cached data
3447 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3449 ret = i915_gem_object_set_cache_level(obj,
3450 HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3452 goto err_unpin_display;
3454 /* As the user may map the buffer once pinned in the display plane
3455 * (e.g. libkms for the bootup splash), we have to ensure that we
3456 * always use map_and_fenceable for all scanout buffers.
3458 ret = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3459 view->type == I915_GGTT_VIEW_NORMAL ?
3462 goto err_unpin_display;
3464 i915_gem_object_flush_cpu_write_domain(obj);
3466 old_write_domain = obj->base.write_domain;
3467 old_read_domains = obj->base.read_domains;
3469 /* It should now be out of any other write domains, and we can update
3470 * the domain values for our changes.
3472 obj->base.write_domain = 0;
3473 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3475 trace_i915_gem_object_change_domain(obj,
3487 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
3488 const struct i915_ggtt_view *view)
3490 if (WARN_ON(obj->pin_display == 0))
3493 i915_gem_object_ggtt_unpin_view(obj, view);
3499 * Moves a single object to the CPU read, and possibly write domain.
3500 * @obj: object to act on
3501 * @write: requesting write or read-only access
3503 * This function returns when the move is complete, including waiting on
3507 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3509 uint32_t old_write_domain, old_read_domains;
3512 ret = i915_gem_object_wait_rendering(obj, !write);
3516 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
3519 i915_gem_object_flush_gtt_write_domain(obj);
3521 old_write_domain = obj->base.write_domain;
3522 old_read_domains = obj->base.read_domains;
3524 /* Flush the CPU cache if it's still invalid. */
3525 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3526 i915_gem_clflush_object(obj, false);
3528 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3531 /* It should now be out of any other write domains, and we can update
3532 * the domain values for our changes.
3534 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3536 /* If we're writing through the CPU, then the GPU read domains will
3537 * need to be invalidated at next use.
3540 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3541 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3544 trace_i915_gem_object_change_domain(obj,
3551 /* Throttle our rendering by waiting until the ring has completed our requests
3552 * emitted over 20 msec ago.
3554 * Note that if we were to use the current jiffies each time around the loop,
3555 * we wouldn't escape the function with any frames outstanding if the time to
3556 * render a frame was over 20ms.
3558 * This should get us reasonable parallelism between CPU and GPU but also
3559 * relatively low latency when blocking on a particular request to finish.
3562 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3564 struct drm_i915_private *dev_priv = to_i915(dev);
3565 struct drm_i915_file_private *file_priv = file->driver_priv;
3566 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3567 struct drm_i915_gem_request *request, *target = NULL;
3570 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
3574 /* ABI: return -EIO if already wedged */
3575 if (i915_terminally_wedged(&dev_priv->gpu_error))
3578 spin_lock(&file_priv->mm.lock);
3579 list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3580 if (time_after_eq(request->emitted_jiffies, recent_enough))
3584 * Note that the request might not have been submitted yet.
3585 * In which case emitted_jiffies will be zero.
3587 if (!request->emitted_jiffies)
3593 i915_gem_request_get(target);
3594 spin_unlock(&file_priv->mm.lock);
3599 ret = i915_wait_request(target, true, NULL, NULL);
3600 i915_gem_request_put(target);
3606 i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
3608 struct drm_i915_gem_object *obj = vma->obj;
3610 if (!drm_mm_node_allocated(&vma->node))
3613 if (vma->node.size < size)
3616 if (alignment && vma->node.start & (alignment - 1))
3619 if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
3622 if (flags & PIN_OFFSET_BIAS &&
3623 vma->node.start < (flags & PIN_OFFSET_MASK))
3626 if (flags & PIN_OFFSET_FIXED &&
3627 vma->node.start != (flags & PIN_OFFSET_MASK))
3633 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
3635 struct drm_i915_gem_object *obj = vma->obj;
3636 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3637 bool mappable, fenceable;
3638 u32 fence_size, fence_alignment;
3640 fence_size = i915_gem_get_ggtt_size(dev_priv,
3642 i915_gem_object_get_tiling(obj));
3643 fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
3645 i915_gem_object_get_tiling(obj),
3648 fenceable = (vma->node.size == fence_size &&
3649 (vma->node.start & (fence_alignment - 1)) == 0);
3651 mappable = (vma->node.start + fence_size <=
3652 dev_priv->ggtt.mappable_end);
3654 obj->map_and_fenceable = mappable && fenceable;
3657 int __i915_vma_do_pin(struct i915_vma *vma,
3658 u64 size, u64 alignment, u64 flags)
3660 unsigned int bound = vma->flags;
3663 GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
3664 GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
3666 if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
3671 if ((bound & I915_VMA_BIND_MASK) == 0) {
3672 ret = i915_vma_insert(vma, size, alignment, flags);
3677 ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
3681 if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
3682 __i915_vma_set_map_and_fenceable(vma);
3684 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
3688 __i915_vma_unpin(vma);
3693 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3694 const struct i915_ggtt_view *view,
3699 struct i915_vma *vma;
3703 view = &i915_ggtt_view_normal;
3705 vma = i915_gem_obj_lookup_or_create_ggtt_vma(obj, view);
3707 return PTR_ERR(vma);
3709 if (i915_vma_misplaced(vma, size, alignment, flags)) {
3710 if (flags & PIN_NONBLOCK &&
3711 (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
3714 WARN(i915_vma_is_pinned(vma),
3715 "bo is already pinned in ggtt with incorrect alignment:"
3716 " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d,"
3717 " obj->map_and_fenceable=%d\n",
3718 upper_32_bits(vma->node.start),
3719 lower_32_bits(vma->node.start),
3721 !!(flags & PIN_MAPPABLE),
3722 obj->map_and_fenceable);
3723 ret = i915_vma_unbind(vma);
3728 return i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
3732 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
3733 const struct i915_ggtt_view *view)
3735 i915_vma_unpin(i915_gem_obj_to_ggtt_view(obj, view));
3738 static __always_inline unsigned __busy_read_flag(unsigned int id)
3740 /* Note that we could alias engines in the execbuf API, but
3741 * that would be very unwise as it prevents userspace from
3742 * fine control over engine selection. Ahem.
3744 * This should be something like EXEC_MAX_ENGINE instead of
3747 BUILD_BUG_ON(I915_NUM_ENGINES > 16);
3748 return 0x10000 << id;
3751 static __always_inline unsigned int __busy_write_id(unsigned int id)
3756 static __always_inline unsigned
3757 __busy_set_if_active(const struct i915_gem_active *active,
3758 unsigned int (*flag)(unsigned int id))
3760 /* For more discussion about the barriers and locking concerns,
3761 * see __i915_gem_active_get_rcu().
3764 struct drm_i915_gem_request *request;
3767 request = rcu_dereference(active->request);
3768 if (!request || i915_gem_request_completed(request))
3771 id = request->engine->exec_id;
3773 /* Check that the pointer wasn't reassigned and overwritten. */
3774 if (request == rcu_access_pointer(active->request))
3779 static inline unsigned
3780 busy_check_reader(const struct i915_gem_active *active)
3782 return __busy_set_if_active(active, __busy_read_flag);
3785 static inline unsigned
3786 busy_check_writer(const struct i915_gem_active *active)
3788 return __busy_set_if_active(active, __busy_write_id);
3792 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3793 struct drm_file *file)
3795 struct drm_i915_gem_busy *args = data;
3796 struct drm_i915_gem_object *obj;
3797 unsigned long active;
3799 obj = i915_gem_object_lookup(file, args->handle);
3804 active = __I915_BO_ACTIVE(obj);
3808 /* Yes, the lookups are intentionally racy.
3810 * First, we cannot simply rely on __I915_BO_ACTIVE. We have
3811 * to regard the value as stale and as our ABI guarantees
3812 * forward progress, we confirm the status of each active
3813 * request with the hardware.
3815 * Even though we guard the pointer lookup by RCU, that only
3816 * guarantees that the pointer and its contents remain
3817 * dereferencable and does *not* mean that the request we
3818 * have is the same as the one being tracked by the object.
3820 * Consider that we lookup the request just as it is being
3821 * retired and freed. We take a local copy of the pointer,
3822 * but before we add its engine into the busy set, the other
3823 * thread reallocates it and assigns it to a task on another
3824 * engine with a fresh and incomplete seqno.
3826 * So after we lookup the engine's id, we double check that
3827 * the active request is the same and only then do we add it
3828 * into the busy set.
3832 for_each_active(active, idx)
3833 args->busy |= busy_check_reader(&obj->last_read[idx]);
3835 /* For ABI sanity, we only care that the write engine is in
3836 * the set of read engines. This is ensured by the ordering
3837 * of setting last_read/last_write in i915_vma_move_to_active,
3838 * and then in reverse in retire.
3840 * We don't care that the set of active read/write engines
3841 * may change during construction of the result, as it is
3842 * equally liable to change before userspace can inspect
3845 args->busy |= busy_check_writer(&obj->last_write);
3850 i915_gem_object_put_unlocked(obj);
3855 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3856 struct drm_file *file_priv)
3858 return i915_gem_ring_throttle(dev, file_priv);
3862 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3863 struct drm_file *file_priv)
3865 struct drm_i915_private *dev_priv = to_i915(dev);
3866 struct drm_i915_gem_madvise *args = data;
3867 struct drm_i915_gem_object *obj;
3870 switch (args->madv) {
3871 case I915_MADV_DONTNEED:
3872 case I915_MADV_WILLNEED:
3878 ret = i915_mutex_lock_interruptible(dev);
3882 obj = i915_gem_object_lookup(file_priv, args->handle);
3889 i915_gem_object_is_tiled(obj) &&
3890 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
3891 if (obj->madv == I915_MADV_WILLNEED)
3892 i915_gem_object_unpin_pages(obj);
3893 if (args->madv == I915_MADV_WILLNEED)
3894 i915_gem_object_pin_pages(obj);
3897 if (obj->madv != __I915_MADV_PURGED)
3898 obj->madv = args->madv;
3900 /* if the object is no longer attached, discard its backing storage */
3901 if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL)
3902 i915_gem_object_truncate(obj);
3904 args->retained = obj->madv != __I915_MADV_PURGED;
3906 i915_gem_object_put(obj);
3908 mutex_unlock(&dev->struct_mutex);
3912 void i915_gem_object_init(struct drm_i915_gem_object *obj,
3913 const struct drm_i915_gem_object_ops *ops)
3917 INIT_LIST_HEAD(&obj->global_list);
3918 for (i = 0; i < I915_NUM_ENGINES; i++)
3919 init_request_active(&obj->last_read[i],
3920 i915_gem_object_retire__read);
3921 init_request_active(&obj->last_write,
3922 i915_gem_object_retire__write);
3923 init_request_active(&obj->last_fence, NULL);
3924 INIT_LIST_HEAD(&obj->obj_exec_link);
3925 INIT_LIST_HEAD(&obj->vma_list);
3926 INIT_LIST_HEAD(&obj->batch_pool_link);
3930 obj->fence_reg = I915_FENCE_REG_NONE;
3931 obj->madv = I915_MADV_WILLNEED;
3933 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
3936 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3937 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
3938 .get_pages = i915_gem_object_get_pages_gtt,
3939 .put_pages = i915_gem_object_put_pages_gtt,
3942 struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
3945 struct drm_i915_gem_object *obj;
3946 struct address_space *mapping;
3950 obj = i915_gem_object_alloc(dev);
3952 return ERR_PTR(-ENOMEM);
3954 ret = drm_gem_object_init(dev, &obj->base, size);
3958 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3959 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
3960 /* 965gm cannot relocate objects above 4GiB. */
3961 mask &= ~__GFP_HIGHMEM;
3962 mask |= __GFP_DMA32;
3965 mapping = obj->base.filp->f_mapping;
3966 mapping_set_gfp_mask(mapping, mask);
3968 i915_gem_object_init(obj, &i915_gem_object_ops);
3970 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3971 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3974 /* On some devices, we can have the GPU use the LLC (the CPU
3975 * cache) for about a 10% performance improvement
3976 * compared to uncached. Graphics requests other than
3977 * display scanout are coherent with the CPU in
3978 * accessing this cache. This means in this mode we
3979 * don't need to clflush on the CPU side, and on the
3980 * GPU side we only need to flush internal caches to
3981 * get data visible to the CPU.
3983 * However, we maintain the display planes as UC, and so
3984 * need to rebind when first used as such.
3986 obj->cache_level = I915_CACHE_LLC;
3988 obj->cache_level = I915_CACHE_NONE;
3990 trace_i915_gem_object_create(obj);
3995 i915_gem_object_free(obj);
3997 return ERR_PTR(ret);
4000 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4002 /* If we are the last user of the backing storage (be it shmemfs
4003 * pages or stolen etc), we know that the pages are going to be
4004 * immediately released. In this case, we can then skip copying
4005 * back the contents from the GPU.
4008 if (obj->madv != I915_MADV_WILLNEED)
4011 if (obj->base.filp == NULL)
4014 /* At first glance, this looks racy, but then again so would be
4015 * userspace racing mmap against close. However, the first external
4016 * reference to the filp can only be obtained through the
4017 * i915_gem_mmap_ioctl() which safeguards us against the user
4018 * acquiring such a reference whilst we are in the middle of
4019 * freeing the object.
4021 return atomic_long_read(&obj->base.filp->f_count) == 1;
4024 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4026 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4027 struct drm_device *dev = obj->base.dev;
4028 struct drm_i915_private *dev_priv = to_i915(dev);
4029 struct i915_vma *vma, *next;
4031 intel_runtime_pm_get(dev_priv);
4033 trace_i915_gem_object_destroy(obj);
4035 /* All file-owned VMA should have been released by this point through
4036 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4037 * However, the object may also be bound into the global GTT (e.g.
4038 * older GPUs without per-process support, or for direct access through
4039 * the GTT either for the user or for scanout). Those VMA still need to
4042 list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
4043 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
4044 GEM_BUG_ON(i915_vma_is_active(vma));
4045 vma->flags &= ~I915_VMA_PIN_MASK;
4046 i915_vma_close(vma);
4048 GEM_BUG_ON(obj->bind_count);
4050 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4051 * before progressing. */
4053 i915_gem_object_unpin_pages(obj);
4055 WARN_ON(atomic_read(&obj->frontbuffer_bits));
4057 if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4058 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4059 i915_gem_object_is_tiled(obj))
4060 i915_gem_object_unpin_pages(obj);
4062 if (WARN_ON(obj->pages_pin_count))
4063 obj->pages_pin_count = 0;
4064 if (discard_backing_storage(obj))
4065 obj->madv = I915_MADV_DONTNEED;
4066 i915_gem_object_put_pages(obj);
4070 if (obj->base.import_attach)
4071 drm_prime_gem_destroy(&obj->base, NULL);
4073 if (obj->ops->release)
4074 obj->ops->release(obj);
4076 drm_gem_object_release(&obj->base);
4077 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4080 i915_gem_object_free(obj);
4082 intel_runtime_pm_put(dev_priv);
4085 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4086 struct i915_address_space *vm)
4088 struct i915_vma *vma;
4089 list_for_each_entry(vma, &obj->vma_list, obj_link) {
4090 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL &&
4097 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4098 const struct i915_ggtt_view *view)
4100 struct i915_vma *vma;
4104 list_for_each_entry(vma, &obj->vma_list, obj_link)
4105 if (i915_vma_is_ggtt(vma) &&
4106 i915_ggtt_view_equal(&vma->ggtt_view, view))
4111 int i915_gem_suspend(struct drm_device *dev)
4113 struct drm_i915_private *dev_priv = to_i915(dev);
4116 intel_suspend_gt_powersave(dev_priv);
4118 mutex_lock(&dev->struct_mutex);
4120 /* We have to flush all the executing contexts to main memory so
4121 * that they can saved in the hibernation image. To ensure the last
4122 * context image is coherent, we have to switch away from it. That
4123 * leaves the dev_priv->kernel_context still active when
4124 * we actually suspend, and its image in memory may not match the GPU
4125 * state. Fortunately, the kernel_context is disposable and we do
4126 * not rely on its state.
4128 ret = i915_gem_switch_to_kernel_context(dev_priv);
4132 ret = i915_gem_wait_for_idle(dev_priv, true);
4136 i915_gem_retire_requests(dev_priv);
4138 i915_gem_context_lost(dev_priv);
4139 mutex_unlock(&dev->struct_mutex);
4141 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4142 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4143 flush_delayed_work(&dev_priv->gt.idle_work);
4145 /* Assert that we sucessfully flushed all the work and
4146 * reset the GPU back to its idle, low power state.
4148 WARN_ON(dev_priv->gt.awake);
4153 mutex_unlock(&dev->struct_mutex);
4157 void i915_gem_resume(struct drm_device *dev)
4159 struct drm_i915_private *dev_priv = to_i915(dev);
4161 mutex_lock(&dev->struct_mutex);
4162 i915_gem_restore_gtt_mappings(dev);
4164 /* As we didn't flush the kernel context before suspend, we cannot
4165 * guarantee that the context image is complete. So let's just reset
4166 * it and start again.
4168 if (i915.enable_execlists)
4169 intel_lr_context_reset(dev_priv, dev_priv->kernel_context);
4171 mutex_unlock(&dev->struct_mutex);
4174 void i915_gem_init_swizzling(struct drm_device *dev)
4176 struct drm_i915_private *dev_priv = to_i915(dev);
4178 if (INTEL_INFO(dev)->gen < 5 ||
4179 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4182 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4183 DISP_TILE_SURFACE_SWIZZLING);
4188 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4190 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4191 else if (IS_GEN7(dev))
4192 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4193 else if (IS_GEN8(dev))
4194 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4199 static void init_unused_ring(struct drm_device *dev, u32 base)
4201 struct drm_i915_private *dev_priv = to_i915(dev);
4203 I915_WRITE(RING_CTL(base), 0);
4204 I915_WRITE(RING_HEAD(base), 0);
4205 I915_WRITE(RING_TAIL(base), 0);
4206 I915_WRITE(RING_START(base), 0);
4209 static void init_unused_rings(struct drm_device *dev)
4212 init_unused_ring(dev, PRB1_BASE);
4213 init_unused_ring(dev, SRB0_BASE);
4214 init_unused_ring(dev, SRB1_BASE);
4215 init_unused_ring(dev, SRB2_BASE);
4216 init_unused_ring(dev, SRB3_BASE);
4217 } else if (IS_GEN2(dev)) {
4218 init_unused_ring(dev, SRB0_BASE);
4219 init_unused_ring(dev, SRB1_BASE);
4220 } else if (IS_GEN3(dev)) {
4221 init_unused_ring(dev, PRB1_BASE);
4222 init_unused_ring(dev, PRB2_BASE);
4227 i915_gem_init_hw(struct drm_device *dev)
4229 struct drm_i915_private *dev_priv = to_i915(dev);
4230 struct intel_engine_cs *engine;
4233 /* Double layer security blanket, see i915_gem_init() */
4234 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4236 if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
4237 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4239 if (IS_HASWELL(dev))
4240 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4241 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4243 if (HAS_PCH_NOP(dev)) {
4244 if (IS_IVYBRIDGE(dev)) {
4245 u32 temp = I915_READ(GEN7_MSG_CTL);
4246 temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4247 I915_WRITE(GEN7_MSG_CTL, temp);
4248 } else if (INTEL_INFO(dev)->gen >= 7) {
4249 u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4250 temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4251 I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4255 i915_gem_init_swizzling(dev);
4258 * At least 830 can leave some of the unused rings
4259 * "active" (ie. head != tail) after resume which
4260 * will prevent c3 entry. Makes sure all unused rings
4263 init_unused_rings(dev);
4265 BUG_ON(!dev_priv->kernel_context);
4267 ret = i915_ppgtt_init_hw(dev);
4269 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
4273 /* Need to do basic initialisation of all rings first: */
4274 for_each_engine(engine, dev_priv) {
4275 ret = engine->init_hw(engine);
4280 intel_mocs_init_l3cc_table(dev);
4282 /* We can't enable contexts until all firmware is loaded */
4283 ret = intel_guc_setup(dev);
4288 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4292 bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
4294 if (INTEL_INFO(dev_priv)->gen < 6)
4297 /* TODO: make semaphores and Execlists play nicely together */
4298 if (i915.enable_execlists)
4304 #ifdef CONFIG_INTEL_IOMMU
4305 /* Enable semaphores on SNB when IO remapping is off */
4306 if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
4313 int i915_gem_init(struct drm_device *dev)
4315 struct drm_i915_private *dev_priv = to_i915(dev);
4318 mutex_lock(&dev->struct_mutex);
4320 if (!i915.enable_execlists) {
4321 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4323 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4326 /* This is just a security blanket to placate dragons.
4327 * On some systems, we very sporadically observe that the first TLBs
4328 * used by the CS may be stale, despite us poking the TLB reset. If
4329 * we hold the forcewake during initialisation these problems
4330 * just magically go away.
4332 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4334 i915_gem_init_userptr(dev_priv);
4336 ret = i915_gem_init_ggtt(dev_priv);
4340 ret = i915_gem_context_init(dev);
4344 ret = intel_engines_init(dev);
4348 ret = i915_gem_init_hw(dev);
4350 /* Allow engine initialisation to fail by marking the GPU as
4351 * wedged. But we only want to do this where the GPU is angry,
4352 * for all other failure, such as an allocation failure, bail.
4354 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4355 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4360 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4361 mutex_unlock(&dev->struct_mutex);
4367 i915_gem_cleanup_engines(struct drm_device *dev)
4369 struct drm_i915_private *dev_priv = to_i915(dev);
4370 struct intel_engine_cs *engine;
4372 for_each_engine(engine, dev_priv)
4373 dev_priv->gt.cleanup_engine(engine);
4377 init_engine_lists(struct intel_engine_cs *engine)
4379 INIT_LIST_HEAD(&engine->request_list);
4383 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
4385 struct drm_device *dev = &dev_priv->drm;
4387 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
4388 !IS_CHERRYVIEW(dev_priv))
4389 dev_priv->num_fence_regs = 32;
4390 else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
4391 IS_I945GM(dev_priv) || IS_G33(dev_priv))
4392 dev_priv->num_fence_regs = 16;
4394 dev_priv->num_fence_regs = 8;
4396 if (intel_vgpu_active(dev_priv))
4397 dev_priv->num_fence_regs =
4398 I915_READ(vgtif_reg(avail_rs.fence_num));
4400 /* Initialize fence registers to zero */
4401 i915_gem_restore_fences(dev);
4403 i915_gem_detect_bit_6_swizzle(dev);
4407 i915_gem_load_init(struct drm_device *dev)
4409 struct drm_i915_private *dev_priv = to_i915(dev);
4413 kmem_cache_create("i915_gem_object",
4414 sizeof(struct drm_i915_gem_object), 0,
4418 kmem_cache_create("i915_gem_vma",
4419 sizeof(struct i915_vma), 0,
4422 dev_priv->requests =
4423 kmem_cache_create("i915_gem_request",
4424 sizeof(struct drm_i915_gem_request), 0,
4425 SLAB_HWCACHE_ALIGN |
4426 SLAB_RECLAIM_ACCOUNT |
4427 SLAB_DESTROY_BY_RCU,
4430 INIT_LIST_HEAD(&dev_priv->context_list);
4431 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4432 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4433 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4434 for (i = 0; i < I915_NUM_ENGINES; i++)
4435 init_engine_lists(&dev_priv->engine[i]);
4436 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
4437 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4438 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4439 i915_gem_retire_work_handler);
4440 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4441 i915_gem_idle_work_handler);
4442 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4443 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4445 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
4447 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4449 init_waitqueue_head(&dev_priv->pending_flip_queue);
4451 dev_priv->mm.interruptible = true;
4453 spin_lock_init(&dev_priv->fb_tracking.lock);
4456 void i915_gem_load_cleanup(struct drm_device *dev)
4458 struct drm_i915_private *dev_priv = to_i915(dev);
4460 kmem_cache_destroy(dev_priv->requests);
4461 kmem_cache_destroy(dev_priv->vmas);
4462 kmem_cache_destroy(dev_priv->objects);
4464 /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
4468 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
4470 struct drm_i915_gem_object *obj;
4472 /* Called just before we write the hibernation image.
4474 * We need to update the domain tracking to reflect that the CPU
4475 * will be accessing all the pages to create and restore from the
4476 * hibernation, and so upon restoration those pages will be in the
4479 * To make sure the hibernation image contains the latest state,
4480 * we update that state just before writing out the image.
4483 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
4484 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4485 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4488 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
4489 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4490 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4496 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4498 struct drm_i915_file_private *file_priv = file->driver_priv;
4499 struct drm_i915_gem_request *request;
4501 /* Clean up our request list when the client is going away, so that
4502 * later retire_requests won't dereference our soon-to-be-gone
4505 spin_lock(&file_priv->mm.lock);
4506 list_for_each_entry(request, &file_priv->mm.request_list, client_list)
4507 request->file_priv = NULL;
4508 spin_unlock(&file_priv->mm.lock);
4510 if (!list_empty(&file_priv->rps.link)) {
4511 spin_lock(&to_i915(dev)->rps.client_lock);
4512 list_del(&file_priv->rps.link);
4513 spin_unlock(&to_i915(dev)->rps.client_lock);
4517 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
4519 struct drm_i915_file_private *file_priv;
4522 DRM_DEBUG_DRIVER("\n");
4524 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
4528 file->driver_priv = file_priv;
4529 file_priv->dev_priv = to_i915(dev);
4530 file_priv->file = file;
4531 INIT_LIST_HEAD(&file_priv->rps.link);
4533 spin_lock_init(&file_priv->mm.lock);
4534 INIT_LIST_HEAD(&file_priv->mm.request_list);
4536 file_priv->bsd_engine = -1;
4538 ret = i915_gem_context_open(dev, file);
4546 * i915_gem_track_fb - update frontbuffer tracking
4547 * @old: current GEM buffer for the frontbuffer slots
4548 * @new: new GEM buffer for the frontbuffer slots
4549 * @frontbuffer_bits: bitmask of frontbuffer slots
4551 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
4552 * from @old and setting them in @new. Both @old and @new can be NULL.
4554 void i915_gem_track_fb(struct drm_i915_gem_object *old,
4555 struct drm_i915_gem_object *new,
4556 unsigned frontbuffer_bits)
4558 /* Control of individual bits within the mask are guarded by
4559 * the owning plane->mutex, i.e. we can never see concurrent
4560 * manipulation of individual bits. But since the bitfield as a whole
4561 * is updated using RMW, we need to use atomics in order to update
4564 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
4565 sizeof(atomic_t) * BITS_PER_BYTE);
4568 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
4569 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
4573 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
4574 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
4578 /* All the new VM stuff */
4579 u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
4580 struct i915_address_space *vm)
4582 struct drm_i915_private *dev_priv = to_i915(o->base.dev);
4583 struct i915_vma *vma;
4585 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
4587 list_for_each_entry(vma, &o->vma_list, obj_link) {
4588 if (i915_vma_is_ggtt(vma) &&
4589 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4592 return vma->node.start;
4595 WARN(1, "%s vma for this object not found.\n",
4596 i915_is_ggtt(vm) ? "global" : "ppgtt");
4600 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
4601 const struct i915_ggtt_view *view)
4603 struct i915_vma *vma;
4605 list_for_each_entry(vma, &o->vma_list, obj_link)
4606 if (i915_vma_is_ggtt(vma) &&
4607 i915_ggtt_view_equal(&vma->ggtt_view, view))
4608 return vma->node.start;
4610 WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
4614 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
4615 struct i915_address_space *vm)
4617 struct i915_vma *vma;
4619 list_for_each_entry(vma, &o->vma_list, obj_link) {
4620 if (i915_vma_is_ggtt(vma) &&
4621 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
4623 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
4630 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
4631 const struct i915_ggtt_view *view)
4633 struct i915_vma *vma;
4635 list_for_each_entry(vma, &o->vma_list, obj_link)
4636 if (i915_vma_is_ggtt(vma) &&
4637 i915_ggtt_view_equal(&vma->ggtt_view, view) &&
4638 drm_mm_node_allocated(&vma->node))
4644 unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
4646 struct i915_vma *vma;
4648 GEM_BUG_ON(list_empty(&o->vma_list));
4650 list_for_each_entry(vma, &o->vma_list, obj_link) {
4651 if (i915_vma_is_ggtt(vma) &&
4652 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
4653 return vma->node.size;
4659 bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
4661 struct i915_vma *vma;
4662 list_for_each_entry(vma, &obj->vma_list, obj_link)
4663 if (i915_vma_is_pinned(vma))
4669 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
4671 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
4675 /* Only default objects have per-page dirty tracking */
4676 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
4679 page = i915_gem_object_get_page(obj, n);
4680 set_page_dirty(page);
4684 /* Allocate a new GEM object and fill it with the supplied data */
4685 struct drm_i915_gem_object *
4686 i915_gem_object_create_from_data(struct drm_device *dev,
4687 const void *data, size_t size)
4689 struct drm_i915_gem_object *obj;
4690 struct sg_table *sg;
4694 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
4698 ret = i915_gem_object_set_to_cpu_domain(obj, true);
4702 ret = i915_gem_object_get_pages(obj);
4706 i915_gem_object_pin_pages(obj);
4708 bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
4709 obj->dirty = 1; /* Backing store is now out of date */
4710 i915_gem_object_unpin_pages(obj);
4712 if (WARN_ON(bytes != size)) {
4713 DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
4721 i915_gem_object_put(obj);
4722 return ERR_PTR(ret);