2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <drm/drm_vma_manager.h>
29 #include <linux/dma-fence-array.h>
30 #include <linux/kthread.h>
31 #include <linux/dma-resv.h>
32 #include <linux/shmem_fs.h>
33 #include <linux/slab.h>
34 #include <linux/stop_machine.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
37 #include <linux/dma-buf.h>
38 #include <linux/mman.h>
40 #include "display/intel_display.h"
41 #include "display/intel_frontbuffer.h"
43 #include "gem/i915_gem_clflush.h"
44 #include "gem/i915_gem_context.h"
45 #include "gem/i915_gem_ioctls.h"
46 #include "gem/i915_gem_mman.h"
47 #include "gem/i915_gem_region.h"
48 #include "gt/intel_engine_user.h"
49 #include "gt/intel_gt.h"
50 #include "gt/intel_gt_pm.h"
51 #include "gt/intel_workarounds.h"
54 #include "i915_trace.h"
55 #include "i915_vgpu.h"
60 insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
64 err = mutex_lock_interruptible(&ggtt->vm.mutex);
68 memset(node, 0, sizeof(*node));
69 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
70 size, 0, I915_COLOR_UNEVICTABLE,
71 0, ggtt->mappable_end,
74 mutex_unlock(&ggtt->vm.mutex);
80 remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
82 mutex_lock(&ggtt->vm.mutex);
83 drm_mm_remove_node(node);
84 mutex_unlock(&ggtt->vm.mutex);
88 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
89 struct drm_file *file)
91 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
92 struct drm_i915_gem_get_aperture *args = data;
96 if (mutex_lock_interruptible(&ggtt->vm.mutex))
99 pinned = ggtt->vm.reserved;
100 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
101 if (i915_vma_is_pinned(vma))
102 pinned += vma->node.size;
104 mutex_unlock(&ggtt->vm.mutex);
106 args->aper_size = ggtt->vm.total;
107 args->aper_available_size = args->aper_size - pinned;
112 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
115 struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
116 LIST_HEAD(still_in_list);
117 intel_wakeref_t wakeref;
118 struct i915_vma *vma;
121 if (list_empty(&obj->vma.list))
125 * As some machines use ACPI to handle runtime-resume callbacks, and
126 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
127 * as they are required by the shrinker. Ergo, we wake the device up
128 * first just in case.
130 wakeref = intel_runtime_pm_get(rpm);
134 spin_lock(&obj->vma.lock);
135 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
138 struct i915_address_space *vm = vma->vm;
140 list_move_tail(&vma->obj_link, &still_in_list);
141 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
144 if (flags & I915_GEM_OBJECT_UNBIND_TEST) {
150 if (!i915_vm_tryopen(vm))
153 /* Prevent vma being freed by i915_vma_parked as we unbind */
154 vma = __i915_vma_get(vma);
155 spin_unlock(&obj->vma.lock);
159 if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
160 !i915_vma_is_active(vma))
161 ret = i915_vma_unbind(vma);
167 spin_lock(&obj->vma.lock);
169 list_splice_init(&still_in_list, &obj->vma.list);
170 spin_unlock(&obj->vma.lock);
172 if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
173 rcu_barrier(); /* flush the i915_vm_release() */
177 intel_runtime_pm_put(rpm, wakeref);
183 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
184 struct drm_i915_gem_pwrite *args,
185 struct drm_file *file)
187 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
188 char __user *user_data = u64_to_user_ptr(args->data_ptr);
191 * We manually control the domain here and pretend that it
192 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
194 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
196 if (copy_from_user(vaddr, user_data, args->size))
199 drm_clflush_virt_range(vaddr, args->size);
200 intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
202 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
207 i915_gem_create(struct drm_file *file,
208 struct intel_memory_region *mr,
212 struct drm_i915_gem_object *obj;
217 GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
218 size = round_up(*size_p, mr->min_page_size);
222 /* For most of the ABI (e.g. mmap) we think in system pages */
223 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
225 /* Allocate the new object */
226 obj = i915_gem_object_create_region(mr, size, 0);
230 ret = drm_gem_handle_create(file, &obj->base, &handle);
231 /* drop reference from allocate - handle holds it now */
232 i915_gem_object_put(obj);
242 i915_gem_dumb_create(struct drm_file *file,
243 struct drm_device *dev,
244 struct drm_mode_create_dumb *args)
246 enum intel_memory_type mem_type;
247 int cpp = DIV_ROUND_UP(args->bpp, 8);
252 format = DRM_FORMAT_C8;
255 format = DRM_FORMAT_RGB565;
258 format = DRM_FORMAT_XRGB8888;
264 /* have to work out size/pitch and return them */
265 args->pitch = ALIGN(args->width * cpp, 64);
267 /* align stride to page size so that we can remap */
268 if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
269 DRM_FORMAT_MOD_LINEAR))
270 args->pitch = ALIGN(args->pitch, 4096);
272 if (args->pitch < args->width)
275 args->size = mul_u32_u32(args->pitch, args->height);
277 mem_type = INTEL_MEMORY_SYSTEM;
278 if (HAS_LMEM(to_i915(dev)))
279 mem_type = INTEL_MEMORY_LOCAL;
281 return i915_gem_create(file,
282 intel_memory_region_by_type(to_i915(dev),
284 &args->size, &args->handle);
288 * Creates a new mm object and returns a handle to it.
289 * @dev: drm device pointer
290 * @data: ioctl data blob
291 * @file: drm file pointer
294 i915_gem_create_ioctl(struct drm_device *dev, void *data,
295 struct drm_file *file)
297 struct drm_i915_private *i915 = to_i915(dev);
298 struct drm_i915_gem_create *args = data;
300 i915_gem_flush_free_objects(i915);
302 return i915_gem_create(file,
303 intel_memory_region_by_type(i915,
304 INTEL_MEMORY_SYSTEM),
305 &args->size, &args->handle);
309 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
318 drm_clflush_virt_range(vaddr + offset, len);
320 ret = __copy_to_user(user_data, vaddr + offset, len);
324 return ret ? -EFAULT : 0;
328 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
329 struct drm_i915_gem_pread *args)
331 unsigned int needs_clflush;
332 unsigned int idx, offset;
333 struct dma_fence *fence;
334 char __user *user_data;
338 ret = i915_gem_object_lock_interruptible(obj, NULL);
342 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
344 i915_gem_object_unlock(obj);
348 fence = i915_gem_object_lock_fence(obj);
349 i915_gem_object_finish_access(obj);
350 i915_gem_object_unlock(obj);
356 user_data = u64_to_user_ptr(args->data_ptr);
357 offset = offset_in_page(args->offset);
358 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
359 struct page *page = i915_gem_object_get_page(obj, idx);
360 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
362 ret = shmem_pread(page, offset, length, user_data,
372 i915_gem_object_unlock_fence(obj, fence);
377 gtt_user_read(struct io_mapping *mapping,
378 loff_t base, int offset,
379 char __user *user_data, int length)
382 unsigned long unwritten;
384 /* We can use the cpu mem copy function because this is X86. */
385 vaddr = io_mapping_map_atomic_wc(mapping, base);
386 unwritten = __copy_to_user_inatomic(user_data,
387 (void __force *)vaddr + offset,
389 io_mapping_unmap_atomic(vaddr);
391 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
392 unwritten = copy_to_user(user_data,
393 (void __force *)vaddr + offset,
395 io_mapping_unmap(vaddr);
401 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
402 const struct drm_i915_gem_pread *args)
404 struct drm_i915_private *i915 = to_i915(obj->base.dev);
405 struct i915_ggtt *ggtt = &i915->ggtt;
406 intel_wakeref_t wakeref;
407 struct drm_mm_node node;
408 struct dma_fence *fence;
409 void __user *user_data;
410 struct i915_vma *vma;
414 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
415 vma = ERR_PTR(-ENODEV);
416 if (!i915_gem_object_is_tiled(obj))
417 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
419 PIN_NONBLOCK /* NOWARN */ |
422 node.start = i915_ggtt_offset(vma);
425 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
428 GEM_BUG_ON(!drm_mm_node_allocated(&node));
431 ret = i915_gem_object_lock_interruptible(obj, NULL);
435 ret = i915_gem_object_set_to_gtt_domain(obj, false);
437 i915_gem_object_unlock(obj);
441 fence = i915_gem_object_lock_fence(obj);
442 i915_gem_object_unlock(obj);
448 user_data = u64_to_user_ptr(args->data_ptr);
450 offset = args->offset;
453 /* Operation in this page
455 * page_base = page offset within aperture
456 * page_offset = offset within page
457 * page_length = bytes to copy for this page
459 u32 page_base = node.start;
460 unsigned page_offset = offset_in_page(offset);
461 unsigned page_length = PAGE_SIZE - page_offset;
462 page_length = remain < page_length ? remain : page_length;
463 if (drm_mm_node_allocated(&node)) {
464 ggtt->vm.insert_page(&ggtt->vm,
465 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
466 node.start, I915_CACHE_NONE, 0);
468 page_base += offset & PAGE_MASK;
471 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
472 user_data, page_length)) {
477 remain -= page_length;
478 user_data += page_length;
479 offset += page_length;
482 i915_gem_object_unlock_fence(obj, fence);
484 if (drm_mm_node_allocated(&node)) {
485 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
486 remove_mappable_node(ggtt, &node);
491 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
496 * Reads data from the object referenced by handle.
497 * @dev: drm device pointer
498 * @data: ioctl data blob
499 * @file: drm file pointer
501 * On error, the contents of *data are undefined.
504 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
505 struct drm_file *file)
507 struct drm_i915_gem_pread *args = data;
508 struct drm_i915_gem_object *obj;
514 if (!access_ok(u64_to_user_ptr(args->data_ptr),
518 obj = i915_gem_object_lookup(file, args->handle);
522 /* Bounds check source. */
523 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
528 trace_i915_gem_object_pread(obj, args->offset, args->size);
530 ret = i915_gem_object_wait(obj,
531 I915_WAIT_INTERRUPTIBLE,
532 MAX_SCHEDULE_TIMEOUT);
536 ret = i915_gem_object_pin_pages(obj);
540 ret = i915_gem_shmem_pread(obj, args);
541 if (ret == -EFAULT || ret == -ENODEV)
542 ret = i915_gem_gtt_pread(obj, args);
544 i915_gem_object_unpin_pages(obj);
546 i915_gem_object_put(obj);
550 /* This is the fast write path which cannot handle
551 * page faults in the source data
555 ggtt_write(struct io_mapping *mapping,
556 loff_t base, int offset,
557 char __user *user_data, int length)
560 unsigned long unwritten;
562 /* We can use the cpu mem copy function because this is X86. */
563 vaddr = io_mapping_map_atomic_wc(mapping, base);
564 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
566 io_mapping_unmap_atomic(vaddr);
568 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
569 unwritten = copy_from_user((void __force *)vaddr + offset,
571 io_mapping_unmap(vaddr);
578 * This is the fast pwrite path, where we copy the data directly from the
579 * user into the GTT, uncached.
580 * @obj: i915 GEM object
581 * @args: pwrite arguments structure
584 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
585 const struct drm_i915_gem_pwrite *args)
587 struct drm_i915_private *i915 = to_i915(obj->base.dev);
588 struct i915_ggtt *ggtt = &i915->ggtt;
589 struct intel_runtime_pm *rpm = &i915->runtime_pm;
590 intel_wakeref_t wakeref;
591 struct drm_mm_node node;
592 struct dma_fence *fence;
593 struct i915_vma *vma;
595 void __user *user_data;
598 if (i915_gem_object_has_struct_page(obj)) {
600 * Avoid waking the device up if we can fallback, as
601 * waking/resuming is very slow (worst-case 10-100 ms
602 * depending on PCI sleeps and our own resume time).
603 * This easily dwarfs any performance advantage from
604 * using the cache bypass of indirect GGTT access.
606 wakeref = intel_runtime_pm_get_if_in_use(rpm);
610 /* No backing pages, no fallback, we must force GGTT access */
611 wakeref = intel_runtime_pm_get(rpm);
614 vma = ERR_PTR(-ENODEV);
615 if (!i915_gem_object_is_tiled(obj))
616 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
618 PIN_NONBLOCK /* NOWARN */ |
621 node.start = i915_ggtt_offset(vma);
624 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
627 GEM_BUG_ON(!drm_mm_node_allocated(&node));
630 ret = i915_gem_object_lock_interruptible(obj, NULL);
634 ret = i915_gem_object_set_to_gtt_domain(obj, true);
636 i915_gem_object_unlock(obj);
640 fence = i915_gem_object_lock_fence(obj);
641 i915_gem_object_unlock(obj);
647 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
649 user_data = u64_to_user_ptr(args->data_ptr);
650 offset = args->offset;
653 /* Operation in this page
655 * page_base = page offset within aperture
656 * page_offset = offset within page
657 * page_length = bytes to copy for this page
659 u32 page_base = node.start;
660 unsigned int page_offset = offset_in_page(offset);
661 unsigned int page_length = PAGE_SIZE - page_offset;
662 page_length = remain < page_length ? remain : page_length;
663 if (drm_mm_node_allocated(&node)) {
664 /* flush the write before we modify the GGTT */
665 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
666 ggtt->vm.insert_page(&ggtt->vm,
667 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
668 node.start, I915_CACHE_NONE, 0);
669 wmb(); /* flush modifications to the GGTT (insert_page) */
671 page_base += offset & PAGE_MASK;
673 /* If we get a fault while copying data, then (presumably) our
674 * source page isn't available. Return the error and we'll
675 * retry in the slow path.
676 * If the object is non-shmem backed, we retry again with the
677 * path that handles page fault.
679 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
680 user_data, page_length)) {
685 remain -= page_length;
686 user_data += page_length;
687 offset += page_length;
690 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
691 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
693 i915_gem_object_unlock_fence(obj, fence);
695 if (drm_mm_node_allocated(&node)) {
696 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
697 remove_mappable_node(ggtt, &node);
702 intel_runtime_pm_put(rpm, wakeref);
706 /* Per-page copy function for the shmem pwrite fastpath.
707 * Flushes invalid cachelines before writing to the target if
708 * needs_clflush_before is set and flushes out any written cachelines after
709 * writing if needs_clflush is set.
712 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
713 bool needs_clflush_before,
714 bool needs_clflush_after)
721 if (needs_clflush_before)
722 drm_clflush_virt_range(vaddr + offset, len);
724 ret = __copy_from_user(vaddr + offset, user_data, len);
725 if (!ret && needs_clflush_after)
726 drm_clflush_virt_range(vaddr + offset, len);
730 return ret ? -EFAULT : 0;
734 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
735 const struct drm_i915_gem_pwrite *args)
737 unsigned int partial_cacheline_write;
738 unsigned int needs_clflush;
739 unsigned int offset, idx;
740 struct dma_fence *fence;
741 void __user *user_data;
745 ret = i915_gem_object_lock_interruptible(obj, NULL);
749 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
751 i915_gem_object_unlock(obj);
755 fence = i915_gem_object_lock_fence(obj);
756 i915_gem_object_finish_access(obj);
757 i915_gem_object_unlock(obj);
762 /* If we don't overwrite a cacheline completely we need to be
763 * careful to have up-to-date data by first clflushing. Don't
764 * overcomplicate things and flush the entire patch.
766 partial_cacheline_write = 0;
767 if (needs_clflush & CLFLUSH_BEFORE)
768 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
770 user_data = u64_to_user_ptr(args->data_ptr);
772 offset = offset_in_page(args->offset);
773 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
774 struct page *page = i915_gem_object_get_page(obj, idx);
775 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
777 ret = shmem_pwrite(page, offset, length, user_data,
778 (offset | length) & partial_cacheline_write,
779 needs_clflush & CLFLUSH_AFTER);
788 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
789 i915_gem_object_unlock_fence(obj, fence);
795 * Writes data to the object referenced by handle.
797 * @data: ioctl data blob
800 * On error, the contents of the buffer that were to be modified are undefined.
803 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
804 struct drm_file *file)
806 struct drm_i915_gem_pwrite *args = data;
807 struct drm_i915_gem_object *obj;
813 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
816 obj = i915_gem_object_lookup(file, args->handle);
820 /* Bounds check destination. */
821 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
826 /* Writes not allowed into this read-only object */
827 if (i915_gem_object_is_readonly(obj)) {
832 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
835 if (obj->ops->pwrite)
836 ret = obj->ops->pwrite(obj, args);
840 ret = i915_gem_object_wait(obj,
841 I915_WAIT_INTERRUPTIBLE |
843 MAX_SCHEDULE_TIMEOUT);
847 ret = i915_gem_object_pin_pages(obj);
852 /* We can only do the GTT pwrite on untiled buffers, as otherwise
853 * it would end up going through the fenced access, and we'll get
854 * different detiling behavior between reading and writing.
855 * pread/pwrite currently are reading and writing from the CPU
856 * perspective, requiring manual detiling by the client.
858 if (!i915_gem_object_has_struct_page(obj) ||
859 cpu_write_needs_clflush(obj))
860 /* Note that the gtt paths might fail with non-page-backed user
861 * pointers (e.g. gtt mappings when moving data between
862 * textures). Fallback to the shmem path in that case.
864 ret = i915_gem_gtt_pwrite_fast(obj, args);
866 if (ret == -EFAULT || ret == -ENOSPC) {
867 if (i915_gem_object_has_struct_page(obj))
868 ret = i915_gem_shmem_pwrite(obj, args);
870 ret = i915_gem_phys_pwrite(obj, args, file);
873 i915_gem_object_unpin_pages(obj);
875 i915_gem_object_put(obj);
880 * Called when user space has done writes to this buffer
882 * @data: ioctl data blob
886 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
887 struct drm_file *file)
889 struct drm_i915_gem_sw_finish *args = data;
890 struct drm_i915_gem_object *obj;
892 obj = i915_gem_object_lookup(file, args->handle);
897 * Proxy objects are barred from CPU access, so there is no
898 * need to ban sw_finish as it is a nop.
901 /* Pinned buffers may be scanout, so flush the cache */
902 i915_gem_object_flush_if_display(obj);
903 i915_gem_object_put(obj);
908 void i915_gem_runtime_suspend(struct drm_i915_private *i915)
910 struct drm_i915_gem_object *obj, *on;
914 * Only called during RPM suspend. All users of the userfault_list
915 * must be holding an RPM wakeref to ensure that this can not
916 * run concurrently with themselves (and use the struct_mutex for
917 * protection between themselves).
920 list_for_each_entry_safe(obj, on,
921 &i915->ggtt.userfault_list, userfault_link)
922 __i915_gem_object_release_mmap_gtt(obj);
925 * The fence will be lost when the device powers down. If any were
926 * in use by hardware (i.e. they are pinned), we should not be powering
927 * down! All other fences will be reacquired by the user upon waking.
929 for (i = 0; i < i915->ggtt.num_fences; i++) {
930 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
933 * Ideally we want to assert that the fence register is not
934 * live at this point (i.e. that no piece of code will be
935 * trying to write through fence + GTT, as that both violates
936 * our tracking of activity and associated locking/barriers,
937 * but also is illegal given that the hw is powered down).
939 * Previously we used reg->pin_count as a "liveness" indicator.
940 * That is not sufficient, and we need a more fine-grained
941 * tool if we want to have a sanity check here.
947 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
952 static void discard_ggtt_vma(struct i915_vma *vma)
954 struct drm_i915_gem_object *obj = vma->obj;
956 spin_lock(&obj->vma.lock);
957 if (!RB_EMPTY_NODE(&vma->obj_node)) {
958 rb_erase(&vma->obj_node, &obj->vma.tree);
959 RB_CLEAR_NODE(&vma->obj_node);
961 spin_unlock(&obj->vma.lock);
965 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
966 struct i915_gem_ww_ctx *ww,
967 const struct i915_ggtt_view *view,
968 u64 size, u64 alignment, u64 flags)
970 struct drm_i915_private *i915 = to_i915(obj->base.dev);
971 struct i915_ggtt *ggtt = &i915->ggtt;
972 struct i915_vma *vma;
975 if (flags & PIN_MAPPABLE &&
976 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
978 * If the required space is larger than the available
979 * aperture, we will not able to find a slot for the
980 * object and unbinding the object now will be in
981 * vain. Worse, doing so may cause us to ping-pong
982 * the object in and out of the Global GTT and
983 * waste a lot of cycles under the mutex.
985 if (obj->base.size > ggtt->mappable_end)
986 return ERR_PTR(-E2BIG);
989 * If NONBLOCK is set the caller is optimistically
990 * trying to cache the full object within the mappable
991 * aperture, and *must* have a fallback in place for
992 * situations where we cannot bind the object. We
993 * can be a little more lax here and use the fallback
994 * more often to avoid costly migrations of ourselves
995 * and other objects within the aperture.
997 * Half-the-aperture is used as a simple heuristic.
998 * More interesting would to do search for a free
999 * block prior to making the commitment to unbind.
1000 * That caters for the self-harm case, and with a
1001 * little more heuristics (e.g. NOFAULT, NOEVICT)
1002 * we could try to minimise harm to others.
1004 if (flags & PIN_NONBLOCK &&
1005 obj->base.size > ggtt->mappable_end / 2)
1006 return ERR_PTR(-ENOSPC);
1010 vma = i915_vma_instance(obj, &ggtt->vm, view);
1014 if (i915_vma_misplaced(vma, size, alignment, flags)) {
1015 if (flags & PIN_NONBLOCK) {
1016 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
1017 return ERR_PTR(-ENOSPC);
1019 if (flags & PIN_MAPPABLE &&
1020 vma->fence_size > ggtt->mappable_end / 2)
1021 return ERR_PTR(-ENOSPC);
1024 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)) {
1025 discard_ggtt_vma(vma);
1029 ret = i915_vma_unbind(vma);
1031 return ERR_PTR(ret);
1034 ret = i915_vma_pin_ww(vma, ww, size, alignment, flags | PIN_GLOBAL);
1036 return ERR_PTR(ret);
1038 if (vma->fence && !i915_gem_object_is_tiled(obj)) {
1039 mutex_lock(&ggtt->vm.mutex);
1040 i915_vma_revoke_fence(vma);
1041 mutex_unlock(&ggtt->vm.mutex);
1044 ret = i915_vma_wait_for_bind(vma);
1046 i915_vma_unpin(vma);
1047 return ERR_PTR(ret);
1054 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1055 struct drm_file *file_priv)
1057 struct drm_i915_private *i915 = to_i915(dev);
1058 struct drm_i915_gem_madvise *args = data;
1059 struct drm_i915_gem_object *obj;
1062 switch (args->madv) {
1063 case I915_MADV_DONTNEED:
1064 case I915_MADV_WILLNEED:
1070 obj = i915_gem_object_lookup(file_priv, args->handle);
1074 err = mutex_lock_interruptible(&obj->mm.lock);
1078 if (i915_gem_object_has_pages(obj) &&
1079 i915_gem_object_is_tiled(obj) &&
1080 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
1081 if (obj->mm.madv == I915_MADV_WILLNEED) {
1082 GEM_BUG_ON(!obj->mm.quirked);
1083 __i915_gem_object_unpin_pages(obj);
1084 obj->mm.quirked = false;
1086 if (args->madv == I915_MADV_WILLNEED) {
1087 GEM_BUG_ON(obj->mm.quirked);
1088 __i915_gem_object_pin_pages(obj);
1089 obj->mm.quirked = true;
1093 if (obj->mm.madv != __I915_MADV_PURGED)
1094 obj->mm.madv = args->madv;
1096 if (i915_gem_object_has_pages(obj)) {
1097 struct list_head *list;
1099 if (i915_gem_object_is_shrinkable(obj)) {
1100 unsigned long flags;
1102 spin_lock_irqsave(&i915->mm.obj_lock, flags);
1104 if (obj->mm.madv != I915_MADV_WILLNEED)
1105 list = &i915->mm.purge_list;
1107 list = &i915->mm.shrink_list;
1108 list_move_tail(&obj->mm.link, list);
1110 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1114 /* if the object is no longer attached, discard its backing storage */
1115 if (obj->mm.madv == I915_MADV_DONTNEED &&
1116 !i915_gem_object_has_pages(obj))
1117 i915_gem_object_truncate(obj);
1119 args->retained = obj->mm.madv != __I915_MADV_PURGED;
1120 mutex_unlock(&obj->mm.lock);
1123 i915_gem_object_put(obj);
1127 int i915_gem_init(struct drm_i915_private *dev_priv)
1131 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1132 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1133 mkwrite_device_info(dev_priv)->page_sizes =
1134 I915_GTT_PAGE_SIZE_4K;
1136 ret = i915_gem_init_userptr(dev_priv);
1140 intel_uc_fetch_firmwares(&dev_priv->gt.uc);
1141 intel_wopcm_init(&dev_priv->wopcm);
1143 ret = i915_init_ggtt(dev_priv);
1145 GEM_BUG_ON(ret == -EIO);
1150 * Despite its name intel_init_clock_gating applies both display
1151 * clock gating workarounds; GT mmio workarounds and the occasional
1152 * GT power context workaround. Worse, sometimes it includes a context
1153 * register workaround which we need to apply before we record the
1154 * default HW state for all contexts.
1156 * FIXME: break up the workarounds and apply them at the right time!
1158 intel_init_clock_gating(dev_priv);
1160 ret = intel_gt_init(&dev_priv->gt);
1167 * Unwinding is complicated by that we want to handle -EIO to mean
1168 * disable GPU submission but keep KMS alive. We want to mark the
1169 * HW as irrevisibly wedged, but keep enough state around that the
1170 * driver doesn't explode during runtime.
1173 i915_gem_drain_workqueue(dev_priv);
1176 intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1177 i915_gem_cleanup_userptr(dev_priv);
1182 * Allow engines or uC initialisation to fail by marking the GPU
1183 * as wedged. But we only want to do this when the GPU is angry,
1184 * for all other failure, such as an allocation failure, bail.
1186 if (!intel_gt_is_wedged(&dev_priv->gt)) {
1187 i915_probe_error(dev_priv,
1188 "Failed to initialize GPU, declaring it wedged!\n");
1189 intel_gt_set_wedged(&dev_priv->gt);
1192 /* Minimal basic recovery for KMS */
1193 ret = i915_ggtt_enable_hw(dev_priv);
1194 i915_ggtt_resume(&dev_priv->ggtt);
1195 intel_init_clock_gating(dev_priv);
1198 i915_gem_drain_freed_objects(dev_priv);
1202 void i915_gem_driver_register(struct drm_i915_private *i915)
1204 i915_gem_driver_register__shrinker(i915);
1206 intel_engines_driver_register(i915);
1209 void i915_gem_driver_unregister(struct drm_i915_private *i915)
1211 i915_gem_driver_unregister__shrinker(i915);
1214 void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
1216 intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
1218 i915_gem_suspend_late(dev_priv);
1219 intel_gt_driver_remove(&dev_priv->gt);
1220 dev_priv->uabi_engines = RB_ROOT;
1222 /* Flush any outstanding unpin_work. */
1223 i915_gem_drain_workqueue(dev_priv);
1225 i915_gem_drain_freed_objects(dev_priv);
1228 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
1230 i915_gem_driver_release__contexts(dev_priv);
1232 intel_gt_driver_release(&dev_priv->gt);
1234 intel_wa_list_free(&dev_priv->gt_wa_list);
1236 intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1237 i915_gem_cleanup_userptr(dev_priv);
1239 i915_gem_drain_freed_objects(dev_priv);
1241 drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
1244 static void i915_gem_init__mm(struct drm_i915_private *i915)
1246 spin_lock_init(&i915->mm.obj_lock);
1248 init_llist_head(&i915->mm.free_list);
1250 INIT_LIST_HEAD(&i915->mm.purge_list);
1251 INIT_LIST_HEAD(&i915->mm.shrink_list);
1253 i915_gem_init__objects(i915);
1256 void i915_gem_init_early(struct drm_i915_private *dev_priv)
1258 i915_gem_init__mm(dev_priv);
1259 i915_gem_init__contexts(dev_priv);
1261 spin_lock_init(&dev_priv->fb_tracking.lock);
1264 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1266 i915_gem_drain_freed_objects(dev_priv);
1267 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1268 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1269 drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
1272 int i915_gem_freeze(struct drm_i915_private *dev_priv)
1274 /* Discard all purgeable objects, let userspace recover those as
1275 * required after resuming.
1277 i915_gem_shrink_all(dev_priv);
1282 int i915_gem_freeze_late(struct drm_i915_private *i915)
1284 struct drm_i915_gem_object *obj;
1285 intel_wakeref_t wakeref;
1288 * Called just before we write the hibernation image.
1290 * We need to update the domain tracking to reflect that the CPU
1291 * will be accessing all the pages to create and restore from the
1292 * hibernation, and so upon restoration those pages will be in the
1295 * To make sure the hibernation image contains the latest state,
1296 * we update that state just before writing out the image.
1298 * To try and reduce the hibernation image, we manually shrink
1299 * the objects as well, see i915_gem_freeze()
1302 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1304 i915_gem_shrink(i915, -1UL, NULL, ~0);
1305 i915_gem_drain_freed_objects(i915);
1307 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
1308 i915_gem_object_lock(obj, NULL);
1309 drm_WARN_ON(&i915->drm,
1310 i915_gem_object_set_to_cpu_domain(obj, true));
1311 i915_gem_object_unlock(obj);
1314 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1319 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1321 struct drm_i915_file_private *file_priv;
1326 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1330 file->driver_priv = file_priv;
1331 file_priv->dev_priv = i915;
1332 file_priv->file = file;
1334 file_priv->bsd_engine = -1;
1335 file_priv->hang_timestamp = jiffies;
1337 ret = i915_gem_context_open(i915, file);
1344 void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ww, bool intr)
1346 ww_acquire_init(&ww->ctx, &reservation_ww_class);
1347 INIT_LIST_HEAD(&ww->obj_list);
1349 ww->contended = NULL;
1352 static void i915_gem_ww_ctx_unlock_all(struct i915_gem_ww_ctx *ww)
1354 struct drm_i915_gem_object *obj;
1356 while ((obj = list_first_entry_or_null(&ww->obj_list, struct drm_i915_gem_object, obj_link))) {
1357 list_del(&obj->obj_link);
1358 i915_gem_object_unlock(obj);
1362 void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj)
1364 list_del(&obj->obj_link);
1365 i915_gem_object_unlock(obj);
1368 void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ww)
1370 i915_gem_ww_ctx_unlock_all(ww);
1371 WARN_ON(ww->contended);
1372 ww_acquire_fini(&ww->ctx);
1375 int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ww)
1379 if (WARN_ON(!ww->contended))
1382 i915_gem_ww_ctx_unlock_all(ww);
1384 ret = dma_resv_lock_slow_interruptible(ww->contended->base.resv, &ww->ctx);
1386 dma_resv_lock_slow(ww->contended->base.resv, &ww->ctx);
1389 list_add_tail(&ww->contended->obj_link, &ww->obj_list);
1391 ww->contended = NULL;
1396 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1397 #include "selftests/mock_gem_device.c"
1398 #include "selftests/i915_gem.c"