2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <drm/drm_vma_manager.h>
29 #include <linux/dma-fence-array.h>
30 #include <linux/kthread.h>
31 #include <linux/dma-resv.h>
32 #include <linux/shmem_fs.h>
33 #include <linux/slab.h>
34 #include <linux/stop_machine.h>
35 #include <linux/swap.h>
36 #include <linux/pci.h>
37 #include <linux/dma-buf.h>
38 #include <linux/mman.h>
40 #include "display/intel_display.h"
41 #include "display/intel_frontbuffer.h"
43 #include "gem/i915_gem_clflush.h"
44 #include "gem/i915_gem_context.h"
45 #include "gem/i915_gem_ioctls.h"
46 #include "gem/i915_gem_mman.h"
47 #include "gem/i915_gem_region.h"
48 #include "gt/intel_engine_user.h"
49 #include "gt/intel_gt.h"
50 #include "gt/intel_gt_pm.h"
51 #include "gt/intel_workarounds.h"
54 #include "i915_trace.h"
55 #include "i915_vgpu.h"
60 insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size)
64 err = mutex_lock_interruptible(&ggtt->vm.mutex);
68 memset(node, 0, sizeof(*node));
69 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
70 size, 0, I915_COLOR_UNEVICTABLE,
71 0, ggtt->mappable_end,
74 mutex_unlock(&ggtt->vm.mutex);
80 remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node)
82 mutex_lock(&ggtt->vm.mutex);
83 drm_mm_remove_node(node);
84 mutex_unlock(&ggtt->vm.mutex);
88 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
89 struct drm_file *file)
91 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
92 struct drm_i915_gem_get_aperture *args = data;
96 if (mutex_lock_interruptible(&ggtt->vm.mutex))
99 pinned = ggtt->vm.reserved;
100 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
101 if (i915_vma_is_pinned(vma))
102 pinned += vma->node.size;
104 mutex_unlock(&ggtt->vm.mutex);
106 args->aper_size = ggtt->vm.total;
107 args->aper_available_size = args->aper_size - pinned;
112 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
115 struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm;
116 LIST_HEAD(still_in_list);
117 intel_wakeref_t wakeref;
118 struct i915_vma *vma;
121 if (!atomic_read(&obj->bind_count))
125 * As some machines use ACPI to handle runtime-resume callbacks, and
126 * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex
127 * as they are required by the shrinker. Ergo, we wake the device up
128 * first just in case.
130 wakeref = intel_runtime_pm_get(rpm);
134 spin_lock(&obj->vma.lock);
135 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
138 struct i915_address_space *vm = vma->vm;
140 list_move_tail(&vma->obj_link, &still_in_list);
141 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK))
145 if (!i915_vm_tryopen(vm))
148 /* Prevent vma being freed by i915_vma_parked as we unbind */
149 vma = __i915_vma_get(vma);
150 spin_unlock(&obj->vma.lock);
154 if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE ||
155 !i915_vma_is_active(vma))
156 ret = i915_vma_unbind(vma);
162 spin_lock(&obj->vma.lock);
164 list_splice_init(&still_in_list, &obj->vma.list);
165 spin_unlock(&obj->vma.lock);
167 if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_BARRIER) {
168 rcu_barrier(); /* flush the i915_vm_release() */
172 intel_runtime_pm_put(rpm, wakeref);
178 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
179 struct drm_i915_gem_pwrite *args,
180 struct drm_file *file)
182 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
183 char __user *user_data = u64_to_user_ptr(args->data_ptr);
186 * We manually control the domain here and pretend that it
187 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
189 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
191 if (copy_from_user(vaddr, user_data, args->size))
194 drm_clflush_virt_range(vaddr, args->size);
195 intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
197 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
202 i915_gem_create(struct drm_file *file,
203 struct intel_memory_region *mr,
207 struct drm_i915_gem_object *obj;
212 GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
213 size = round_up(*size_p, mr->min_page_size);
217 /* For most of the ABI (e.g. mmap) we think in system pages */
218 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
220 /* Allocate the new object */
221 obj = i915_gem_object_create_region(mr, size, 0);
225 ret = drm_gem_handle_create(file, &obj->base, &handle);
226 /* drop reference from allocate - handle holds it now */
227 i915_gem_object_put(obj);
237 i915_gem_dumb_create(struct drm_file *file,
238 struct drm_device *dev,
239 struct drm_mode_create_dumb *args)
241 enum intel_memory_type mem_type;
242 int cpp = DIV_ROUND_UP(args->bpp, 8);
247 format = DRM_FORMAT_C8;
250 format = DRM_FORMAT_RGB565;
253 format = DRM_FORMAT_XRGB8888;
259 /* have to work out size/pitch and return them */
260 args->pitch = ALIGN(args->width * cpp, 64);
262 /* align stride to page size so that we can remap */
263 if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
264 DRM_FORMAT_MOD_LINEAR))
265 args->pitch = ALIGN(args->pitch, 4096);
267 if (args->pitch < args->width)
270 args->size = mul_u32_u32(args->pitch, args->height);
272 mem_type = INTEL_MEMORY_SYSTEM;
273 if (HAS_LMEM(to_i915(dev)))
274 mem_type = INTEL_MEMORY_LOCAL;
276 return i915_gem_create(file,
277 intel_memory_region_by_type(to_i915(dev),
279 &args->size, &args->handle);
283 * Creates a new mm object and returns a handle to it.
284 * @dev: drm device pointer
285 * @data: ioctl data blob
286 * @file: drm file pointer
289 i915_gem_create_ioctl(struct drm_device *dev, void *data,
290 struct drm_file *file)
292 struct drm_i915_private *i915 = to_i915(dev);
293 struct drm_i915_gem_create *args = data;
295 i915_gem_flush_free_objects(i915);
297 return i915_gem_create(file,
298 intel_memory_region_by_type(i915,
299 INTEL_MEMORY_SYSTEM),
300 &args->size, &args->handle);
304 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
313 drm_clflush_virt_range(vaddr + offset, len);
315 ret = __copy_to_user(user_data, vaddr + offset, len);
319 return ret ? -EFAULT : 0;
323 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
324 struct drm_i915_gem_pread *args)
326 unsigned int needs_clflush;
327 unsigned int idx, offset;
328 struct dma_fence *fence;
329 char __user *user_data;
333 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
337 fence = i915_gem_object_lock_fence(obj);
338 i915_gem_object_finish_access(obj);
343 user_data = u64_to_user_ptr(args->data_ptr);
344 offset = offset_in_page(args->offset);
345 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
346 struct page *page = i915_gem_object_get_page(obj, idx);
347 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
349 ret = shmem_pread(page, offset, length, user_data,
359 i915_gem_object_unlock_fence(obj, fence);
364 gtt_user_read(struct io_mapping *mapping,
365 loff_t base, int offset,
366 char __user *user_data, int length)
369 unsigned long unwritten;
371 /* We can use the cpu mem copy function because this is X86. */
372 vaddr = io_mapping_map_atomic_wc(mapping, base);
373 unwritten = __copy_to_user_inatomic(user_data,
374 (void __force *)vaddr + offset,
376 io_mapping_unmap_atomic(vaddr);
378 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
379 unwritten = copy_to_user(user_data,
380 (void __force *)vaddr + offset,
382 io_mapping_unmap(vaddr);
388 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
389 const struct drm_i915_gem_pread *args)
391 struct drm_i915_private *i915 = to_i915(obj->base.dev);
392 struct i915_ggtt *ggtt = &i915->ggtt;
393 intel_wakeref_t wakeref;
394 struct drm_mm_node node;
395 struct dma_fence *fence;
396 void __user *user_data;
397 struct i915_vma *vma;
401 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
402 vma = ERR_PTR(-ENODEV);
403 if (!i915_gem_object_is_tiled(obj))
404 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
406 PIN_NONBLOCK /* NOWARN */ |
409 node.start = i915_ggtt_offset(vma);
412 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
415 GEM_BUG_ON(!drm_mm_node_allocated(&node));
418 ret = i915_gem_object_lock_interruptible(obj);
422 ret = i915_gem_object_set_to_gtt_domain(obj, false);
424 i915_gem_object_unlock(obj);
428 fence = i915_gem_object_lock_fence(obj);
429 i915_gem_object_unlock(obj);
435 user_data = u64_to_user_ptr(args->data_ptr);
437 offset = args->offset;
440 /* Operation in this page
442 * page_base = page offset within aperture
443 * page_offset = offset within page
444 * page_length = bytes to copy for this page
446 u32 page_base = node.start;
447 unsigned page_offset = offset_in_page(offset);
448 unsigned page_length = PAGE_SIZE - page_offset;
449 page_length = remain < page_length ? remain : page_length;
450 if (drm_mm_node_allocated(&node)) {
451 ggtt->vm.insert_page(&ggtt->vm,
452 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
453 node.start, I915_CACHE_NONE, 0);
455 page_base += offset & PAGE_MASK;
458 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
459 user_data, page_length)) {
464 remain -= page_length;
465 user_data += page_length;
466 offset += page_length;
469 i915_gem_object_unlock_fence(obj, fence);
471 if (drm_mm_node_allocated(&node)) {
472 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
473 remove_mappable_node(ggtt, &node);
478 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
483 * Reads data from the object referenced by handle.
484 * @dev: drm device pointer
485 * @data: ioctl data blob
486 * @file: drm file pointer
488 * On error, the contents of *data are undefined.
491 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
492 struct drm_file *file)
494 struct drm_i915_gem_pread *args = data;
495 struct drm_i915_gem_object *obj;
501 if (!access_ok(u64_to_user_ptr(args->data_ptr),
505 obj = i915_gem_object_lookup(file, args->handle);
509 /* Bounds check source. */
510 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
515 trace_i915_gem_object_pread(obj, args->offset, args->size);
517 ret = i915_gem_object_wait(obj,
518 I915_WAIT_INTERRUPTIBLE,
519 MAX_SCHEDULE_TIMEOUT);
523 ret = i915_gem_object_pin_pages(obj);
527 ret = i915_gem_shmem_pread(obj, args);
528 if (ret == -EFAULT || ret == -ENODEV)
529 ret = i915_gem_gtt_pread(obj, args);
531 i915_gem_object_unpin_pages(obj);
533 i915_gem_object_put(obj);
537 /* This is the fast write path which cannot handle
538 * page faults in the source data
542 ggtt_write(struct io_mapping *mapping,
543 loff_t base, int offset,
544 char __user *user_data, int length)
547 unsigned long unwritten;
549 /* We can use the cpu mem copy function because this is X86. */
550 vaddr = io_mapping_map_atomic_wc(mapping, base);
551 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
553 io_mapping_unmap_atomic(vaddr);
555 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
556 unwritten = copy_from_user((void __force *)vaddr + offset,
558 io_mapping_unmap(vaddr);
565 * This is the fast pwrite path, where we copy the data directly from the
566 * user into the GTT, uncached.
567 * @obj: i915 GEM object
568 * @args: pwrite arguments structure
571 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
572 const struct drm_i915_gem_pwrite *args)
574 struct drm_i915_private *i915 = to_i915(obj->base.dev);
575 struct i915_ggtt *ggtt = &i915->ggtt;
576 struct intel_runtime_pm *rpm = &i915->runtime_pm;
577 intel_wakeref_t wakeref;
578 struct drm_mm_node node;
579 struct dma_fence *fence;
580 struct i915_vma *vma;
582 void __user *user_data;
585 if (i915_gem_object_has_struct_page(obj)) {
587 * Avoid waking the device up if we can fallback, as
588 * waking/resuming is very slow (worst-case 10-100 ms
589 * depending on PCI sleeps and our own resume time).
590 * This easily dwarfs any performance advantage from
591 * using the cache bypass of indirect GGTT access.
593 wakeref = intel_runtime_pm_get_if_in_use(rpm);
597 /* No backing pages, no fallback, we must force GGTT access */
598 wakeref = intel_runtime_pm_get(rpm);
601 vma = ERR_PTR(-ENODEV);
602 if (!i915_gem_object_is_tiled(obj))
603 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
605 PIN_NONBLOCK /* NOWARN */ |
608 node.start = i915_ggtt_offset(vma);
611 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
614 GEM_BUG_ON(!drm_mm_node_allocated(&node));
617 ret = i915_gem_object_lock_interruptible(obj);
621 ret = i915_gem_object_set_to_gtt_domain(obj, true);
623 i915_gem_object_unlock(obj);
627 fence = i915_gem_object_lock_fence(obj);
628 i915_gem_object_unlock(obj);
634 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
636 user_data = u64_to_user_ptr(args->data_ptr);
637 offset = args->offset;
640 /* Operation in this page
642 * page_base = page offset within aperture
643 * page_offset = offset within page
644 * page_length = bytes to copy for this page
646 u32 page_base = node.start;
647 unsigned int page_offset = offset_in_page(offset);
648 unsigned int page_length = PAGE_SIZE - page_offset;
649 page_length = remain < page_length ? remain : page_length;
650 if (drm_mm_node_allocated(&node)) {
651 /* flush the write before we modify the GGTT */
652 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
653 ggtt->vm.insert_page(&ggtt->vm,
654 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
655 node.start, I915_CACHE_NONE, 0);
656 wmb(); /* flush modifications to the GGTT (insert_page) */
658 page_base += offset & PAGE_MASK;
660 /* If we get a fault while copying data, then (presumably) our
661 * source page isn't available. Return the error and we'll
662 * retry in the slow path.
663 * If the object is non-shmem backed, we retry again with the
664 * path that handles page fault.
666 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
667 user_data, page_length)) {
672 remain -= page_length;
673 user_data += page_length;
674 offset += page_length;
677 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
678 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
680 i915_gem_object_unlock_fence(obj, fence);
682 if (drm_mm_node_allocated(&node)) {
683 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
684 remove_mappable_node(ggtt, &node);
689 intel_runtime_pm_put(rpm, wakeref);
693 /* Per-page copy function for the shmem pwrite fastpath.
694 * Flushes invalid cachelines before writing to the target if
695 * needs_clflush_before is set and flushes out any written cachelines after
696 * writing if needs_clflush is set.
699 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
700 bool needs_clflush_before,
701 bool needs_clflush_after)
708 if (needs_clflush_before)
709 drm_clflush_virt_range(vaddr + offset, len);
711 ret = __copy_from_user(vaddr + offset, user_data, len);
712 if (!ret && needs_clflush_after)
713 drm_clflush_virt_range(vaddr + offset, len);
717 return ret ? -EFAULT : 0;
721 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
722 const struct drm_i915_gem_pwrite *args)
724 unsigned int partial_cacheline_write;
725 unsigned int needs_clflush;
726 unsigned int offset, idx;
727 struct dma_fence *fence;
728 void __user *user_data;
732 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
736 fence = i915_gem_object_lock_fence(obj);
737 i915_gem_object_finish_access(obj);
741 /* If we don't overwrite a cacheline completely we need to be
742 * careful to have up-to-date data by first clflushing. Don't
743 * overcomplicate things and flush the entire patch.
745 partial_cacheline_write = 0;
746 if (needs_clflush & CLFLUSH_BEFORE)
747 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
749 user_data = u64_to_user_ptr(args->data_ptr);
751 offset = offset_in_page(args->offset);
752 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
753 struct page *page = i915_gem_object_get_page(obj, idx);
754 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
756 ret = shmem_pwrite(page, offset, length, user_data,
757 (offset | length) & partial_cacheline_write,
758 needs_clflush & CLFLUSH_AFTER);
767 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
768 i915_gem_object_unlock_fence(obj, fence);
774 * Writes data to the object referenced by handle.
776 * @data: ioctl data blob
779 * On error, the contents of the buffer that were to be modified are undefined.
782 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
783 struct drm_file *file)
785 struct drm_i915_gem_pwrite *args = data;
786 struct drm_i915_gem_object *obj;
792 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
795 obj = i915_gem_object_lookup(file, args->handle);
799 /* Bounds check destination. */
800 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
805 /* Writes not allowed into this read-only object */
806 if (i915_gem_object_is_readonly(obj)) {
811 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
814 if (obj->ops->pwrite)
815 ret = obj->ops->pwrite(obj, args);
819 ret = i915_gem_object_wait(obj,
820 I915_WAIT_INTERRUPTIBLE |
822 MAX_SCHEDULE_TIMEOUT);
826 ret = i915_gem_object_pin_pages(obj);
831 /* We can only do the GTT pwrite on untiled buffers, as otherwise
832 * it would end up going through the fenced access, and we'll get
833 * different detiling behavior between reading and writing.
834 * pread/pwrite currently are reading and writing from the CPU
835 * perspective, requiring manual detiling by the client.
837 if (!i915_gem_object_has_struct_page(obj) ||
838 cpu_write_needs_clflush(obj))
839 /* Note that the gtt paths might fail with non-page-backed user
840 * pointers (e.g. gtt mappings when moving data between
841 * textures). Fallback to the shmem path in that case.
843 ret = i915_gem_gtt_pwrite_fast(obj, args);
845 if (ret == -EFAULT || ret == -ENOSPC) {
846 if (i915_gem_object_has_struct_page(obj))
847 ret = i915_gem_shmem_pwrite(obj, args);
849 ret = i915_gem_phys_pwrite(obj, args, file);
852 i915_gem_object_unpin_pages(obj);
854 i915_gem_object_put(obj);
859 * Called when user space has done writes to this buffer
861 * @data: ioctl data blob
865 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
866 struct drm_file *file)
868 struct drm_i915_gem_sw_finish *args = data;
869 struct drm_i915_gem_object *obj;
871 obj = i915_gem_object_lookup(file, args->handle);
876 * Proxy objects are barred from CPU access, so there is no
877 * need to ban sw_finish as it is a nop.
880 /* Pinned buffers may be scanout, so flush the cache */
881 i915_gem_object_flush_if_display(obj);
882 i915_gem_object_put(obj);
887 void i915_gem_runtime_suspend(struct drm_i915_private *i915)
889 struct drm_i915_gem_object *obj, *on;
893 * Only called during RPM suspend. All users of the userfault_list
894 * must be holding an RPM wakeref to ensure that this can not
895 * run concurrently with themselves (and use the struct_mutex for
896 * protection between themselves).
899 list_for_each_entry_safe(obj, on,
900 &i915->ggtt.userfault_list, userfault_link)
901 __i915_gem_object_release_mmap_gtt(obj);
904 * The fence will be lost when the device powers down. If any were
905 * in use by hardware (i.e. they are pinned), we should not be powering
906 * down! All other fences will be reacquired by the user upon waking.
908 for (i = 0; i < i915->ggtt.num_fences; i++) {
909 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
912 * Ideally we want to assert that the fence register is not
913 * live at this point (i.e. that no piece of code will be
914 * trying to write through fence + GTT, as that both violates
915 * our tracking of activity and associated locking/barriers,
916 * but also is illegal given that the hw is powered down).
918 * Previously we used reg->pin_count as a "liveness" indicator.
919 * That is not sufficient, and we need a more fine-grained
920 * tool if we want to have a sanity check here.
926 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
932 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
933 const struct i915_ggtt_view *view,
938 struct drm_i915_private *i915 = to_i915(obj->base.dev);
939 struct i915_ggtt *ggtt = &i915->ggtt;
940 struct i915_vma *vma;
943 if (flags & PIN_MAPPABLE &&
944 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
946 * If the required space is larger than the available
947 * aperture, we will not able to find a slot for the
948 * object and unbinding the object now will be in
949 * vain. Worse, doing so may cause us to ping-pong
950 * the object in and out of the Global GTT and
951 * waste a lot of cycles under the mutex.
953 if (obj->base.size > ggtt->mappable_end)
954 return ERR_PTR(-E2BIG);
957 * If NONBLOCK is set the caller is optimistically
958 * trying to cache the full object within the mappable
959 * aperture, and *must* have a fallback in place for
960 * situations where we cannot bind the object. We
961 * can be a little more lax here and use the fallback
962 * more often to avoid costly migrations of ourselves
963 * and other objects within the aperture.
965 * Half-the-aperture is used as a simple heuristic.
966 * More interesting would to do search for a free
967 * block prior to making the commitment to unbind.
968 * That caters for the self-harm case, and with a
969 * little more heuristics (e.g. NOFAULT, NOEVICT)
970 * we could try to minimise harm to others.
972 if (flags & PIN_NONBLOCK &&
973 obj->base.size > ggtt->mappable_end / 2)
974 return ERR_PTR(-ENOSPC);
977 vma = i915_vma_instance(obj, &ggtt->vm, view);
981 if (i915_vma_misplaced(vma, size, alignment, flags)) {
982 if (flags & PIN_NONBLOCK) {
983 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
984 return ERR_PTR(-ENOSPC);
986 if (flags & PIN_MAPPABLE &&
987 vma->fence_size > ggtt->mappable_end / 2)
988 return ERR_PTR(-ENOSPC);
991 ret = i915_vma_unbind(vma);
996 if (vma->fence && !i915_gem_object_is_tiled(obj)) {
997 mutex_lock(&ggtt->vm.mutex);
998 ret = i915_vma_revoke_fence(vma);
999 mutex_unlock(&ggtt->vm.mutex);
1001 return ERR_PTR(ret);
1004 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
1006 return ERR_PTR(ret);
1008 ret = i915_vma_wait_for_bind(vma);
1010 i915_vma_unpin(vma);
1011 return ERR_PTR(ret);
1018 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1019 struct drm_file *file_priv)
1021 struct drm_i915_private *i915 = to_i915(dev);
1022 struct drm_i915_gem_madvise *args = data;
1023 struct drm_i915_gem_object *obj;
1026 switch (args->madv) {
1027 case I915_MADV_DONTNEED:
1028 case I915_MADV_WILLNEED:
1034 obj = i915_gem_object_lookup(file_priv, args->handle);
1038 err = mutex_lock_interruptible(&obj->mm.lock);
1042 if (i915_gem_object_has_pages(obj) &&
1043 i915_gem_object_is_tiled(obj) &&
1044 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
1045 if (obj->mm.madv == I915_MADV_WILLNEED) {
1046 GEM_BUG_ON(!obj->mm.quirked);
1047 __i915_gem_object_unpin_pages(obj);
1048 obj->mm.quirked = false;
1050 if (args->madv == I915_MADV_WILLNEED) {
1051 GEM_BUG_ON(obj->mm.quirked);
1052 __i915_gem_object_pin_pages(obj);
1053 obj->mm.quirked = true;
1057 if (obj->mm.madv != __I915_MADV_PURGED)
1058 obj->mm.madv = args->madv;
1060 if (i915_gem_object_has_pages(obj)) {
1061 struct list_head *list;
1063 if (i915_gem_object_is_shrinkable(obj)) {
1064 unsigned long flags;
1066 spin_lock_irqsave(&i915->mm.obj_lock, flags);
1068 if (obj->mm.madv != I915_MADV_WILLNEED)
1069 list = &i915->mm.purge_list;
1071 list = &i915->mm.shrink_list;
1072 list_move_tail(&obj->mm.link, list);
1074 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1078 /* if the object is no longer attached, discard its backing storage */
1079 if (obj->mm.madv == I915_MADV_DONTNEED &&
1080 !i915_gem_object_has_pages(obj))
1081 i915_gem_object_truncate(obj);
1083 args->retained = obj->mm.madv != __I915_MADV_PURGED;
1084 mutex_unlock(&obj->mm.lock);
1087 i915_gem_object_put(obj);
1091 int i915_gem_init(struct drm_i915_private *dev_priv)
1095 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1096 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1097 mkwrite_device_info(dev_priv)->page_sizes =
1098 I915_GTT_PAGE_SIZE_4K;
1100 ret = i915_gem_init_userptr(dev_priv);
1104 intel_uc_fetch_firmwares(&dev_priv->gt.uc);
1105 intel_wopcm_init(&dev_priv->wopcm);
1107 ret = i915_init_ggtt(dev_priv);
1109 GEM_BUG_ON(ret == -EIO);
1114 * Despite its name intel_init_clock_gating applies both display
1115 * clock gating workarounds; GT mmio workarounds and the occasional
1116 * GT power context workaround. Worse, sometimes it includes a context
1117 * register workaround which we need to apply before we record the
1118 * default HW state for all contexts.
1120 * FIXME: break up the workarounds and apply them at the right time!
1122 intel_init_clock_gating(dev_priv);
1124 ret = intel_gt_init(&dev_priv->gt);
1131 * Unwinding is complicated by that we want to handle -EIO to mean
1132 * disable GPU submission but keep KMS alive. We want to mark the
1133 * HW as irrevisibly wedged, but keep enough state around that the
1134 * driver doesn't explode during runtime.
1137 i915_gem_drain_workqueue(dev_priv);
1140 intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1141 i915_gem_cleanup_userptr(dev_priv);
1146 * Allow engines or uC initialisation to fail by marking the GPU
1147 * as wedged. But we only want to do this when the GPU is angry,
1148 * for all other failure, such as an allocation failure, bail.
1150 if (!intel_gt_is_wedged(&dev_priv->gt)) {
1151 i915_probe_error(dev_priv,
1152 "Failed to initialize GPU, declaring it wedged!\n");
1153 intel_gt_set_wedged(&dev_priv->gt);
1156 /* Minimal basic recovery for KMS */
1157 ret = i915_ggtt_enable_hw(dev_priv);
1158 i915_ggtt_resume(&dev_priv->ggtt);
1159 i915_gem_restore_fences(&dev_priv->ggtt);
1160 intel_init_clock_gating(dev_priv);
1163 i915_gem_drain_freed_objects(dev_priv);
1167 void i915_gem_driver_register(struct drm_i915_private *i915)
1169 i915_gem_driver_register__shrinker(i915);
1171 intel_engines_driver_register(i915);
1174 void i915_gem_driver_unregister(struct drm_i915_private *i915)
1176 i915_gem_driver_unregister__shrinker(i915);
1179 void i915_gem_driver_remove(struct drm_i915_private *dev_priv)
1181 intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
1183 i915_gem_suspend_late(dev_priv);
1184 intel_gt_driver_remove(&dev_priv->gt);
1185 dev_priv->uabi_engines = RB_ROOT;
1187 /* Flush any outstanding unpin_work. */
1188 i915_gem_drain_workqueue(dev_priv);
1190 i915_gem_drain_freed_objects(dev_priv);
1193 void i915_gem_driver_release(struct drm_i915_private *dev_priv)
1195 i915_gem_driver_release__contexts(dev_priv);
1197 intel_gt_driver_release(&dev_priv->gt);
1199 intel_wa_list_free(&dev_priv->gt_wa_list);
1201 intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
1202 i915_gem_cleanup_userptr(dev_priv);
1204 i915_gem_drain_freed_objects(dev_priv);
1206 drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
1209 static void i915_gem_init__mm(struct drm_i915_private *i915)
1211 spin_lock_init(&i915->mm.obj_lock);
1213 init_llist_head(&i915->mm.free_list);
1215 INIT_LIST_HEAD(&i915->mm.purge_list);
1216 INIT_LIST_HEAD(&i915->mm.shrink_list);
1218 i915_gem_init__objects(i915);
1221 void i915_gem_init_early(struct drm_i915_private *dev_priv)
1223 i915_gem_init__mm(dev_priv);
1224 i915_gem_init__contexts(dev_priv);
1226 spin_lock_init(&dev_priv->fb_tracking.lock);
1229 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1231 i915_gem_drain_freed_objects(dev_priv);
1232 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1233 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1234 drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
1237 int i915_gem_freeze(struct drm_i915_private *dev_priv)
1239 /* Discard all purgeable objects, let userspace recover those as
1240 * required after resuming.
1242 i915_gem_shrink_all(dev_priv);
1247 int i915_gem_freeze_late(struct drm_i915_private *i915)
1249 struct drm_i915_gem_object *obj;
1250 intel_wakeref_t wakeref;
1253 * Called just before we write the hibernation image.
1255 * We need to update the domain tracking to reflect that the CPU
1256 * will be accessing all the pages to create and restore from the
1257 * hibernation, and so upon restoration those pages will be in the
1260 * To make sure the hibernation image contains the latest state,
1261 * we update that state just before writing out the image.
1263 * To try and reduce the hibernation image, we manually shrink
1264 * the objects as well, see i915_gem_freeze()
1267 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1269 i915_gem_shrink(i915, -1UL, NULL, ~0);
1270 i915_gem_drain_freed_objects(i915);
1272 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
1273 i915_gem_object_lock(obj);
1274 drm_WARN_ON(&i915->drm,
1275 i915_gem_object_set_to_cpu_domain(obj, true));
1276 i915_gem_object_unlock(obj);
1279 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1284 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
1286 struct drm_i915_file_private *file_priv = file->driver_priv;
1287 struct i915_request *request;
1289 /* Clean up our request list when the client is going away, so that
1290 * later retire_requests won't dereference our soon-to-be-gone
1293 spin_lock(&file_priv->mm.lock);
1294 list_for_each_entry(request, &file_priv->mm.request_list, client_link)
1295 request->file_priv = NULL;
1296 spin_unlock(&file_priv->mm.lock);
1299 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1301 struct drm_i915_file_private *file_priv;
1306 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1310 file->driver_priv = file_priv;
1311 file_priv->dev_priv = i915;
1312 file_priv->file = file;
1314 spin_lock_init(&file_priv->mm.lock);
1315 INIT_LIST_HEAD(&file_priv->mm.request_list);
1317 file_priv->bsd_engine = -1;
1318 file_priv->hang_timestamp = jiffies;
1320 ret = i915_gem_context_open(i915, file);
1327 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1328 #include "selftests/mock_gem_device.c"
1329 #include "selftests/i915_gem.c"