2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <drm/drm_vma_manager.h>
29 #include <drm/i915_drm.h>
30 #include <linux/dma-fence-array.h>
31 #include <linux/kthread.h>
32 #include <linux/reservation.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/slab.h>
35 #include <linux/stop_machine.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mman.h>
41 #include "gem/i915_gem_clflush.h"
42 #include "gem/i915_gem_context.h"
43 #include "gem/i915_gem_ioctls.h"
44 #include "gem/i915_gem_pm.h"
45 #include "gem/i915_gemfs.h"
46 #include "gt/intel_engine_pm.h"
47 #include "gt/intel_gt_pm.h"
48 #include "gt/intel_mocs.h"
49 #include "gt/intel_reset.h"
50 #include "gt/intel_workarounds.h"
53 #include "i915_scatterlist.h"
54 #include "i915_trace.h"
55 #include "i915_vgpu.h"
57 #include "intel_display.h"
58 #include "intel_drv.h"
59 #include "intel_frontbuffer.h"
63 insert_mappable_node(struct i915_ggtt *ggtt,
64 struct drm_mm_node *node, u32 size)
66 memset(node, 0, sizeof(*node));
67 return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
68 size, 0, I915_COLOR_UNEVICTABLE,
69 0, ggtt->mappable_end,
74 remove_mappable_node(struct drm_mm_node *node)
76 drm_mm_remove_node(node);
80 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
81 struct drm_file *file)
83 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
84 struct drm_i915_gem_get_aperture *args = data;
88 mutex_lock(&ggtt->vm.mutex);
90 pinned = ggtt->vm.reserved;
91 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
92 if (i915_vma_is_pinned(vma))
93 pinned += vma->node.size;
95 mutex_unlock(&ggtt->vm.mutex);
97 args->aper_size = ggtt->vm.total;
98 args->aper_available_size = args->aper_size - pinned;
103 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
105 struct i915_vma *vma;
106 LIST_HEAD(still_in_list);
109 lockdep_assert_held(&obj->base.dev->struct_mutex);
111 spin_lock(&obj->vma.lock);
112 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
115 list_move_tail(&vma->obj_link, &still_in_list);
116 spin_unlock(&obj->vma.lock);
118 ret = i915_vma_unbind(vma);
120 spin_lock(&obj->vma.lock);
122 list_splice(&still_in_list, &obj->vma.list);
123 spin_unlock(&obj->vma.lock);
129 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
130 struct drm_i915_gem_pwrite *args,
131 struct drm_file *file)
133 void *vaddr = obj->phys_handle->vaddr + args->offset;
134 char __user *user_data = u64_to_user_ptr(args->data_ptr);
136 /* We manually control the domain here and pretend that it
137 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
139 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
140 if (copy_from_user(vaddr, user_data, args->size))
143 drm_clflush_virt_range(vaddr, args->size);
144 i915_gem_chipset_flush(to_i915(obj->base.dev));
146 intel_fb_obj_flush(obj, ORIGIN_CPU);
151 i915_gem_create(struct drm_file *file,
152 struct drm_i915_private *dev_priv,
156 struct drm_i915_gem_object *obj;
161 size = round_up(*size_p, PAGE_SIZE);
165 /* Allocate the new object */
166 obj = i915_gem_object_create_shmem(dev_priv, size);
170 ret = drm_gem_handle_create(file, &obj->base, &handle);
171 /* drop reference from allocate - handle holds it now */
172 i915_gem_object_put(obj);
182 i915_gem_dumb_create(struct drm_file *file,
183 struct drm_device *dev,
184 struct drm_mode_create_dumb *args)
186 int cpp = DIV_ROUND_UP(args->bpp, 8);
191 format = DRM_FORMAT_C8;
194 format = DRM_FORMAT_RGB565;
197 format = DRM_FORMAT_XRGB8888;
203 /* have to work out size/pitch and return them */
204 args->pitch = ALIGN(args->width * cpp, 64);
206 /* align stride to page size so that we can remap */
207 if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
208 DRM_FORMAT_MOD_LINEAR))
209 args->pitch = ALIGN(args->pitch, 4096);
211 args->size = args->pitch * args->height;
212 return i915_gem_create(file, to_i915(dev),
213 &args->size, &args->handle);
217 * Creates a new mm object and returns a handle to it.
218 * @dev: drm device pointer
219 * @data: ioctl data blob
220 * @file: drm file pointer
223 i915_gem_create_ioctl(struct drm_device *dev, void *data,
224 struct drm_file *file)
226 struct drm_i915_private *dev_priv = to_i915(dev);
227 struct drm_i915_gem_create *args = data;
229 i915_gem_flush_free_objects(dev_priv);
231 return i915_gem_create(file, dev_priv,
232 &args->size, &args->handle);
235 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
237 intel_wakeref_t wakeref;
240 * No actual flushing is required for the GTT write domain for reads
241 * from the GTT domain. Writes to it "immediately" go to main memory
242 * as far as we know, so there's no chipset flush. It also doesn't
243 * land in the GPU render cache.
245 * However, we do have to enforce the order so that all writes through
246 * the GTT land before any writes to the device, such as updates to
249 * We also have to wait a bit for the writes to land from the GTT.
250 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
251 * timing. This issue has only been observed when switching quickly
252 * between GTT writes and CPU reads from inside the kernel on recent hw,
253 * and it appears to only affect discrete GTT blocks (i.e. on LLC
254 * system agents we cannot reproduce this behaviour, until Cannonlake
260 if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
263 i915_gem_chipset_flush(dev_priv);
265 with_intel_runtime_pm(dev_priv, wakeref) {
266 spin_lock_irq(&dev_priv->uncore.lock);
268 POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
270 spin_unlock_irq(&dev_priv->uncore.lock);
275 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
284 drm_clflush_virt_range(vaddr + offset, len);
286 ret = __copy_to_user(user_data, vaddr + offset, len);
290 return ret ? -EFAULT : 0;
294 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
295 struct drm_i915_gem_pread *args)
297 unsigned int needs_clflush;
298 unsigned int idx, offset;
299 struct dma_fence *fence;
300 char __user *user_data;
304 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
308 fence = i915_gem_object_lock_fence(obj);
309 i915_gem_object_finish_access(obj);
314 user_data = u64_to_user_ptr(args->data_ptr);
315 offset = offset_in_page(args->offset);
316 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
317 struct page *page = i915_gem_object_get_page(obj, idx);
318 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
320 ret = shmem_pread(page, offset, length, user_data,
330 i915_gem_object_unlock_fence(obj, fence);
335 gtt_user_read(struct io_mapping *mapping,
336 loff_t base, int offset,
337 char __user *user_data, int length)
340 unsigned long unwritten;
342 /* We can use the cpu mem copy function because this is X86. */
343 vaddr = io_mapping_map_atomic_wc(mapping, base);
344 unwritten = __copy_to_user_inatomic(user_data,
345 (void __force *)vaddr + offset,
347 io_mapping_unmap_atomic(vaddr);
349 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
350 unwritten = copy_to_user(user_data,
351 (void __force *)vaddr + offset,
353 io_mapping_unmap(vaddr);
359 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
360 const struct drm_i915_gem_pread *args)
362 struct drm_i915_private *i915 = to_i915(obj->base.dev);
363 struct i915_ggtt *ggtt = &i915->ggtt;
364 intel_wakeref_t wakeref;
365 struct drm_mm_node node;
366 struct dma_fence *fence;
367 void __user *user_data;
368 struct i915_vma *vma;
372 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
376 wakeref = intel_runtime_pm_get(i915);
377 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
382 node.start = i915_ggtt_offset(vma);
383 node.allocated = false;
384 ret = i915_vma_put_fence(vma);
391 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
394 GEM_BUG_ON(!node.allocated);
397 mutex_unlock(&i915->drm.struct_mutex);
399 ret = i915_gem_object_lock_interruptible(obj);
403 ret = i915_gem_object_set_to_gtt_domain(obj, false);
405 i915_gem_object_unlock(obj);
409 fence = i915_gem_object_lock_fence(obj);
410 i915_gem_object_unlock(obj);
416 user_data = u64_to_user_ptr(args->data_ptr);
418 offset = args->offset;
421 /* Operation in this page
423 * page_base = page offset within aperture
424 * page_offset = offset within page
425 * page_length = bytes to copy for this page
427 u32 page_base = node.start;
428 unsigned page_offset = offset_in_page(offset);
429 unsigned page_length = PAGE_SIZE - page_offset;
430 page_length = remain < page_length ? remain : page_length;
431 if (node.allocated) {
433 ggtt->vm.insert_page(&ggtt->vm,
434 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
435 node.start, I915_CACHE_NONE, 0);
438 page_base += offset & PAGE_MASK;
441 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
442 user_data, page_length)) {
447 remain -= page_length;
448 user_data += page_length;
449 offset += page_length;
452 i915_gem_object_unlock_fence(obj, fence);
454 mutex_lock(&i915->drm.struct_mutex);
455 if (node.allocated) {
457 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
458 remove_mappable_node(&node);
463 intel_runtime_pm_put(i915, wakeref);
464 mutex_unlock(&i915->drm.struct_mutex);
470 * Reads data from the object referenced by handle.
471 * @dev: drm device pointer
472 * @data: ioctl data blob
473 * @file: drm file pointer
475 * On error, the contents of *data are undefined.
478 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
479 struct drm_file *file)
481 struct drm_i915_gem_pread *args = data;
482 struct drm_i915_gem_object *obj;
488 if (!access_ok(u64_to_user_ptr(args->data_ptr),
492 obj = i915_gem_object_lookup(file, args->handle);
496 /* Bounds check source. */
497 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
502 trace_i915_gem_object_pread(obj, args->offset, args->size);
504 ret = i915_gem_object_wait(obj,
505 I915_WAIT_INTERRUPTIBLE,
506 MAX_SCHEDULE_TIMEOUT);
510 ret = i915_gem_object_pin_pages(obj);
514 ret = i915_gem_shmem_pread(obj, args);
515 if (ret == -EFAULT || ret == -ENODEV)
516 ret = i915_gem_gtt_pread(obj, args);
518 i915_gem_object_unpin_pages(obj);
520 i915_gem_object_put(obj);
524 /* This is the fast write path which cannot handle
525 * page faults in the source data
529 ggtt_write(struct io_mapping *mapping,
530 loff_t base, int offset,
531 char __user *user_data, int length)
534 unsigned long unwritten;
536 /* We can use the cpu mem copy function because this is X86. */
537 vaddr = io_mapping_map_atomic_wc(mapping, base);
538 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
540 io_mapping_unmap_atomic(vaddr);
542 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
543 unwritten = copy_from_user((void __force *)vaddr + offset,
545 io_mapping_unmap(vaddr);
552 * This is the fast pwrite path, where we copy the data directly from the
553 * user into the GTT, uncached.
554 * @obj: i915 GEM object
555 * @args: pwrite arguments structure
558 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
559 const struct drm_i915_gem_pwrite *args)
561 struct drm_i915_private *i915 = to_i915(obj->base.dev);
562 struct i915_ggtt *ggtt = &i915->ggtt;
563 intel_wakeref_t wakeref;
564 struct drm_mm_node node;
565 struct dma_fence *fence;
566 struct i915_vma *vma;
568 void __user *user_data;
571 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
575 if (i915_gem_object_has_struct_page(obj)) {
577 * Avoid waking the device up if we can fallback, as
578 * waking/resuming is very slow (worst-case 10-100 ms
579 * depending on PCI sleeps and our own resume time).
580 * This easily dwarfs any performance advantage from
581 * using the cache bypass of indirect GGTT access.
583 wakeref = intel_runtime_pm_get_if_in_use(i915);
589 /* No backing pages, no fallback, we must force GGTT access */
590 wakeref = intel_runtime_pm_get(i915);
593 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
598 node.start = i915_ggtt_offset(vma);
599 node.allocated = false;
600 ret = i915_vma_put_fence(vma);
607 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
610 GEM_BUG_ON(!node.allocated);
613 mutex_unlock(&i915->drm.struct_mutex);
615 ret = i915_gem_object_lock_interruptible(obj);
619 ret = i915_gem_object_set_to_gtt_domain(obj, true);
621 i915_gem_object_unlock(obj);
625 fence = i915_gem_object_lock_fence(obj);
626 i915_gem_object_unlock(obj);
632 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
634 user_data = u64_to_user_ptr(args->data_ptr);
635 offset = args->offset;
638 /* Operation in this page
640 * page_base = page offset within aperture
641 * page_offset = offset within page
642 * page_length = bytes to copy for this page
644 u32 page_base = node.start;
645 unsigned int page_offset = offset_in_page(offset);
646 unsigned int page_length = PAGE_SIZE - page_offset;
647 page_length = remain < page_length ? remain : page_length;
648 if (node.allocated) {
649 wmb(); /* flush the write before we modify the GGTT */
650 ggtt->vm.insert_page(&ggtt->vm,
651 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
652 node.start, I915_CACHE_NONE, 0);
653 wmb(); /* flush modifications to the GGTT (insert_page) */
655 page_base += offset & PAGE_MASK;
657 /* If we get a fault while copying data, then (presumably) our
658 * source page isn't available. Return the error and we'll
659 * retry in the slow path.
660 * If the object is non-shmem backed, we retry again with the
661 * path that handles page fault.
663 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
664 user_data, page_length)) {
669 remain -= page_length;
670 user_data += page_length;
671 offset += page_length;
673 intel_fb_obj_flush(obj, ORIGIN_CPU);
675 i915_gem_object_unlock_fence(obj, fence);
677 mutex_lock(&i915->drm.struct_mutex);
678 if (node.allocated) {
680 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
681 remove_mappable_node(&node);
686 intel_runtime_pm_put(i915, wakeref);
688 mutex_unlock(&i915->drm.struct_mutex);
692 /* Per-page copy function for the shmem pwrite fastpath.
693 * Flushes invalid cachelines before writing to the target if
694 * needs_clflush_before is set and flushes out any written cachelines after
695 * writing if needs_clflush is set.
698 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
699 bool needs_clflush_before,
700 bool needs_clflush_after)
707 if (needs_clflush_before)
708 drm_clflush_virt_range(vaddr + offset, len);
710 ret = __copy_from_user(vaddr + offset, user_data, len);
711 if (!ret && needs_clflush_after)
712 drm_clflush_virt_range(vaddr + offset, len);
716 return ret ? -EFAULT : 0;
720 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
721 const struct drm_i915_gem_pwrite *args)
723 unsigned int partial_cacheline_write;
724 unsigned int needs_clflush;
725 unsigned int offset, idx;
726 struct dma_fence *fence;
727 void __user *user_data;
731 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
735 fence = i915_gem_object_lock_fence(obj);
736 i915_gem_object_finish_access(obj);
740 /* If we don't overwrite a cacheline completely we need to be
741 * careful to have up-to-date data by first clflushing. Don't
742 * overcomplicate things and flush the entire patch.
744 partial_cacheline_write = 0;
745 if (needs_clflush & CLFLUSH_BEFORE)
746 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
748 user_data = u64_to_user_ptr(args->data_ptr);
750 offset = offset_in_page(args->offset);
751 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
752 struct page *page = i915_gem_object_get_page(obj, idx);
753 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
755 ret = shmem_pwrite(page, offset, length, user_data,
756 (offset | length) & partial_cacheline_write,
757 needs_clflush & CLFLUSH_AFTER);
766 intel_fb_obj_flush(obj, ORIGIN_CPU);
767 i915_gem_object_unlock_fence(obj, fence);
773 * Writes data to the object referenced by handle.
775 * @data: ioctl data blob
778 * On error, the contents of the buffer that were to be modified are undefined.
781 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
782 struct drm_file *file)
784 struct drm_i915_gem_pwrite *args = data;
785 struct drm_i915_gem_object *obj;
791 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
794 obj = i915_gem_object_lookup(file, args->handle);
798 /* Bounds check destination. */
799 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
804 /* Writes not allowed into this read-only object */
805 if (i915_gem_object_is_readonly(obj)) {
810 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
813 if (obj->ops->pwrite)
814 ret = obj->ops->pwrite(obj, args);
818 ret = i915_gem_object_wait(obj,
819 I915_WAIT_INTERRUPTIBLE |
821 MAX_SCHEDULE_TIMEOUT);
825 ret = i915_gem_object_pin_pages(obj);
830 /* We can only do the GTT pwrite on untiled buffers, as otherwise
831 * it would end up going through the fenced access, and we'll get
832 * different detiling behavior between reading and writing.
833 * pread/pwrite currently are reading and writing from the CPU
834 * perspective, requiring manual detiling by the client.
836 if (!i915_gem_object_has_struct_page(obj) ||
837 cpu_write_needs_clflush(obj))
838 /* Note that the gtt paths might fail with non-page-backed user
839 * pointers (e.g. gtt mappings when moving data between
840 * textures). Fallback to the shmem path in that case.
842 ret = i915_gem_gtt_pwrite_fast(obj, args);
844 if (ret == -EFAULT || ret == -ENOSPC) {
845 if (obj->phys_handle)
846 ret = i915_gem_phys_pwrite(obj, args, file);
848 ret = i915_gem_shmem_pwrite(obj, args);
851 i915_gem_object_unpin_pages(obj);
853 i915_gem_object_put(obj);
858 * Called when user space has done writes to this buffer
860 * @data: ioctl data blob
864 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
865 struct drm_file *file)
867 struct drm_i915_gem_sw_finish *args = data;
868 struct drm_i915_gem_object *obj;
870 obj = i915_gem_object_lookup(file, args->handle);
875 * Proxy objects are barred from CPU access, so there is no
876 * need to ban sw_finish as it is a nop.
879 /* Pinned buffers may be scanout, so flush the cache */
880 i915_gem_object_flush_if_display(obj);
881 i915_gem_object_put(obj);
886 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
888 struct drm_i915_gem_object *obj, *on;
892 * Only called during RPM suspend. All users of the userfault_list
893 * must be holding an RPM wakeref to ensure that this can not
894 * run concurrently with themselves (and use the struct_mutex for
895 * protection between themselves).
898 list_for_each_entry_safe(obj, on,
899 &dev_priv->mm.userfault_list, userfault_link)
900 __i915_gem_object_release_mmap(obj);
902 /* The fence will be lost when the device powers down. If any were
903 * in use by hardware (i.e. they are pinned), we should not be powering
904 * down! All other fences will be reacquired by the user upon waking.
906 for (i = 0; i < dev_priv->num_fence_regs; i++) {
907 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
909 /* Ideally we want to assert that the fence register is not
910 * live at this point (i.e. that no piece of code will be
911 * trying to write through fence + GTT, as that both violates
912 * our tracking of activity and associated locking/barriers,
913 * but also is illegal given that the hw is powered down).
915 * Previously we used reg->pin_count as a "liveness" indicator.
916 * That is not sufficient, and we need a more fine-grained
917 * tool if we want to have a sanity check here.
923 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
928 static int wait_for_engines(struct drm_i915_private *i915)
930 if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
931 dev_err(i915->drm.dev,
932 "Failed to idle engines, declaring wedged!\n");
934 i915_gem_set_wedged(i915);
942 wait_for_timelines(struct drm_i915_private *i915,
943 unsigned int flags, long timeout)
945 struct i915_gt_timelines *gt = &i915->gt.timelines;
946 struct i915_timeline *tl;
948 mutex_lock(>->mutex);
949 list_for_each_entry(tl, >->active_list, link) {
950 struct i915_request *rq;
952 rq = i915_active_request_get_unlocked(&tl->last_request);
956 mutex_unlock(>->mutex);
961 * Switching to the kernel context is often used a synchronous
962 * step prior to idling, e.g. in suspend for flushing all
963 * current operations to memory before sleeping. These we
964 * want to complete as quickly as possible to avoid prolonged
965 * stalls, so allow the gpu to boost to maximum clocks.
967 if (flags & I915_WAIT_FOR_IDLE_BOOST)
970 timeout = i915_request_wait(rq, flags, timeout);
971 i915_request_put(rq);
975 /* restart after reacquiring the lock */
976 mutex_lock(>->mutex);
977 tl = list_entry(>->active_list, typeof(*tl), link);
979 mutex_unlock(>->mutex);
984 int i915_gem_wait_for_idle(struct drm_i915_private *i915,
985 unsigned int flags, long timeout)
987 GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
988 flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
989 timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
990 yesno(i915->gt.awake));
992 /* If the device is asleep, we have no requests outstanding */
993 if (!READ_ONCE(i915->gt.awake))
996 timeout = wait_for_timelines(i915, flags, timeout);
1000 if (flags & I915_WAIT_LOCKED) {
1003 lockdep_assert_held(&i915->drm.struct_mutex);
1005 err = wait_for_engines(i915);
1009 i915_retire_requests(i915);
1016 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1017 const struct i915_ggtt_view *view,
1022 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1023 struct i915_address_space *vm = &dev_priv->ggtt.vm;
1024 struct i915_vma *vma;
1027 lockdep_assert_held(&obj->base.dev->struct_mutex);
1029 if (flags & PIN_MAPPABLE &&
1030 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
1031 /* If the required space is larger than the available
1032 * aperture, we will not able to find a slot for the
1033 * object and unbinding the object now will be in
1034 * vain. Worse, doing so may cause us to ping-pong
1035 * the object in and out of the Global GTT and
1036 * waste a lot of cycles under the mutex.
1038 if (obj->base.size > dev_priv->ggtt.mappable_end)
1039 return ERR_PTR(-E2BIG);
1041 /* If NONBLOCK is set the caller is optimistically
1042 * trying to cache the full object within the mappable
1043 * aperture, and *must* have a fallback in place for
1044 * situations where we cannot bind the object. We
1045 * can be a little more lax here and use the fallback
1046 * more often to avoid costly migrations of ourselves
1047 * and other objects within the aperture.
1049 * Half-the-aperture is used as a simple heuristic.
1050 * More interesting would to do search for a free
1051 * block prior to making the commitment to unbind.
1052 * That caters for the self-harm case, and with a
1053 * little more heuristics (e.g. NOFAULT, NOEVICT)
1054 * we could try to minimise harm to others.
1056 if (flags & PIN_NONBLOCK &&
1057 obj->base.size > dev_priv->ggtt.mappable_end / 2)
1058 return ERR_PTR(-ENOSPC);
1061 vma = i915_vma_instance(obj, vm, view);
1065 if (i915_vma_misplaced(vma, size, alignment, flags)) {
1066 if (flags & PIN_NONBLOCK) {
1067 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
1068 return ERR_PTR(-ENOSPC);
1070 if (flags & PIN_MAPPABLE &&
1071 vma->fence_size > dev_priv->ggtt.mappable_end / 2)
1072 return ERR_PTR(-ENOSPC);
1075 WARN(i915_vma_is_pinned(vma),
1076 "bo is already pinned in ggtt with incorrect alignment:"
1077 " offset=%08x, req.alignment=%llx,"
1078 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
1079 i915_ggtt_offset(vma), alignment,
1080 !!(flags & PIN_MAPPABLE),
1081 i915_vma_is_map_and_fenceable(vma));
1082 ret = i915_vma_unbind(vma);
1084 return ERR_PTR(ret);
1087 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
1089 return ERR_PTR(ret);
1095 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1096 struct drm_file *file_priv)
1098 struct drm_i915_private *i915 = to_i915(dev);
1099 struct drm_i915_gem_madvise *args = data;
1100 struct drm_i915_gem_object *obj;
1103 switch (args->madv) {
1104 case I915_MADV_DONTNEED:
1105 case I915_MADV_WILLNEED:
1111 obj = i915_gem_object_lookup(file_priv, args->handle);
1115 err = mutex_lock_interruptible(&obj->mm.lock);
1119 if (i915_gem_object_has_pages(obj) &&
1120 i915_gem_object_is_tiled(obj) &&
1121 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
1122 if (obj->mm.madv == I915_MADV_WILLNEED) {
1123 GEM_BUG_ON(!obj->mm.quirked);
1124 __i915_gem_object_unpin_pages(obj);
1125 obj->mm.quirked = false;
1127 if (args->madv == I915_MADV_WILLNEED) {
1128 GEM_BUG_ON(obj->mm.quirked);
1129 __i915_gem_object_pin_pages(obj);
1130 obj->mm.quirked = true;
1134 if (obj->mm.madv != __I915_MADV_PURGED)
1135 obj->mm.madv = args->madv;
1137 if (i915_gem_object_has_pages(obj)) {
1138 struct list_head *list;
1140 if (i915_gem_object_is_shrinkable(obj)) {
1141 unsigned long flags;
1143 spin_lock_irqsave(&i915->mm.obj_lock, flags);
1145 if (obj->mm.madv != I915_MADV_WILLNEED)
1146 list = &i915->mm.purge_list;
1147 else if (obj->bind_count)
1148 list = &i915->mm.bound_list;
1150 list = &i915->mm.unbound_list;
1151 list_move_tail(&obj->mm.link, list);
1153 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1157 /* if the object is no longer attached, discard its backing storage */
1158 if (obj->mm.madv == I915_MADV_DONTNEED &&
1159 !i915_gem_object_has_pages(obj))
1160 i915_gem_object_truncate(obj);
1162 args->retained = obj->mm.madv != __I915_MADV_PURGED;
1163 mutex_unlock(&obj->mm.lock);
1166 i915_gem_object_put(obj);
1170 void i915_gem_sanitize(struct drm_i915_private *i915)
1172 intel_wakeref_t wakeref;
1176 wakeref = intel_runtime_pm_get(i915);
1177 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
1180 * As we have just resumed the machine and woken the device up from
1181 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
1182 * back to defaults, recovering from whatever wedged state we left it
1183 * in and so worth trying to use the device once more.
1185 if (i915_terminally_wedged(i915))
1186 i915_gem_unset_wedged(i915);
1189 * If we inherit context state from the BIOS or earlier occupants
1190 * of the GPU, the GPU may be in an inconsistent state when we
1191 * try to take over. The only way to remove the earlier state
1192 * is by resetting. However, resetting on earlier gen is tricky as
1193 * it may impact the display and we are uncertain about the stability
1194 * of the reset, so this could be applied to even earlier gen.
1196 intel_gt_sanitize(i915, false);
1198 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
1199 intel_runtime_pm_put(i915, wakeref);
1201 mutex_lock(&i915->drm.struct_mutex);
1202 i915_gem_contexts_lost(i915);
1203 mutex_unlock(&i915->drm.struct_mutex);
1206 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
1208 if (INTEL_GEN(dev_priv) < 5 ||
1209 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
1212 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
1213 DISP_TILE_SURFACE_SWIZZLING);
1215 if (IS_GEN(dev_priv, 5))
1218 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
1219 if (IS_GEN(dev_priv, 6))
1220 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
1221 else if (IS_GEN(dev_priv, 7))
1222 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
1223 else if (IS_GEN(dev_priv, 8))
1224 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
1229 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
1231 I915_WRITE(RING_CTL(base), 0);
1232 I915_WRITE(RING_HEAD(base), 0);
1233 I915_WRITE(RING_TAIL(base), 0);
1234 I915_WRITE(RING_START(base), 0);
1237 static void init_unused_rings(struct drm_i915_private *dev_priv)
1239 if (IS_I830(dev_priv)) {
1240 init_unused_ring(dev_priv, PRB1_BASE);
1241 init_unused_ring(dev_priv, SRB0_BASE);
1242 init_unused_ring(dev_priv, SRB1_BASE);
1243 init_unused_ring(dev_priv, SRB2_BASE);
1244 init_unused_ring(dev_priv, SRB3_BASE);
1245 } else if (IS_GEN(dev_priv, 2)) {
1246 init_unused_ring(dev_priv, SRB0_BASE);
1247 init_unused_ring(dev_priv, SRB1_BASE);
1248 } else if (IS_GEN(dev_priv, 3)) {
1249 init_unused_ring(dev_priv, PRB1_BASE);
1250 init_unused_ring(dev_priv, PRB2_BASE);
1254 int i915_gem_init_hw(struct drm_i915_private *dev_priv)
1258 dev_priv->gt.last_init_time = ktime_get();
1260 /* Double layer security blanket, see i915_gem_init() */
1261 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1263 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
1264 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
1266 if (IS_HASWELL(dev_priv))
1267 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
1268 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
1270 /* Apply the GT workarounds... */
1271 intel_gt_apply_workarounds(dev_priv);
1272 /* ...and determine whether they are sticking. */
1273 intel_gt_verify_workarounds(dev_priv, "init");
1275 i915_gem_init_swizzling(dev_priv);
1278 * At least 830 can leave some of the unused rings
1279 * "active" (ie. head != tail) after resume which
1280 * will prevent c3 entry. Makes sure all unused rings
1283 init_unused_rings(dev_priv);
1285 BUG_ON(!dev_priv->kernel_context);
1286 ret = i915_terminally_wedged(dev_priv);
1290 ret = i915_ppgtt_init_hw(dev_priv);
1292 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
1296 ret = intel_wopcm_init_hw(&dev_priv->wopcm);
1298 DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
1302 /* We can't enable contexts until all firmware is loaded */
1303 ret = intel_uc_init_hw(dev_priv);
1305 DRM_ERROR("Enabling uc failed (%d)\n", ret);
1309 intel_mocs_init_l3cc_table(dev_priv);
1311 /* Only when the HW is re-initialised, can we replay the requests */
1312 ret = intel_engines_resume(dev_priv);
1316 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1318 intel_engines_set_scheduler_caps(dev_priv);
1322 intel_uc_fini_hw(dev_priv);
1324 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1329 static int __intel_engines_record_defaults(struct drm_i915_private *i915)
1331 struct intel_engine_cs *engine;
1332 struct i915_gem_context *ctx;
1333 struct i915_gem_engines *e;
1334 enum intel_engine_id id;
1338 * As we reset the gpu during very early sanitisation, the current
1339 * register state on the GPU should reflect its defaults values.
1340 * We load a context onto the hw (with restore-inhibit), then switch
1341 * over to a second context to save that default register state. We
1342 * can then prime every new context with that state so they all start
1343 * from the same default HW values.
1346 ctx = i915_gem_context_create_kernel(i915, 0);
1348 return PTR_ERR(ctx);
1350 e = i915_gem_context_lock_engines(ctx);
1352 for_each_engine(engine, i915, id) {
1353 struct intel_context *ce = e->engines[id];
1354 struct i915_request *rq;
1356 rq = intel_context_create_request(ce);
1363 if (rq->engine->init_context)
1364 err = rq->engine->init_context(rq);
1366 i915_request_add(rq);
1371 /* Flush the default context image to memory, and enable powersaving. */
1372 if (!i915_gem_load_power_context(i915)) {
1377 for_each_engine(engine, i915, id) {
1378 struct intel_context *ce = e->engines[id];
1379 struct i915_vma *state = ce->state;
1385 GEM_BUG_ON(intel_context_is_pinned(ce));
1388 * As we will hold a reference to the logical state, it will
1389 * not be torn down with the context, and importantly the
1390 * object will hold onto its vma (making it possible for a
1391 * stray GTT write to corrupt our defaults). Unmap the vma
1392 * from the GTT to prevent such accidents and reclaim the
1395 err = i915_vma_unbind(state);
1399 i915_gem_object_lock(state->obj);
1400 err = i915_gem_object_set_to_cpu_domain(state->obj, false);
1401 i915_gem_object_unlock(state->obj);
1405 engine->default_state = i915_gem_object_get(state->obj);
1406 i915_gem_object_set_cache_coherency(engine->default_state,
1409 /* Check we can acquire the image of the context state */
1410 vaddr = i915_gem_object_pin_map(engine->default_state,
1412 if (IS_ERR(vaddr)) {
1413 err = PTR_ERR(vaddr);
1417 i915_gem_object_unpin_map(engine->default_state);
1420 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
1421 unsigned int found = intel_engines_has_context_isolation(i915);
1424 * Make sure that classes with multiple engine instances all
1425 * share the same basic configuration.
1427 for_each_engine(engine, i915, id) {
1428 unsigned int bit = BIT(engine->uabi_class);
1429 unsigned int expected = engine->default_state ? bit : 0;
1431 if ((found & bit) != expected) {
1432 DRM_ERROR("mismatching default context state for class %d on engine %s\n",
1433 engine->uabi_class, engine->name);
1439 i915_gem_context_unlock_engines(ctx);
1440 i915_gem_context_set_closed(ctx);
1441 i915_gem_context_put(ctx);
1446 * If we have to abandon now, we expect the engines to be idle
1447 * and ready to be torn-down. The quickest way we can accomplish
1448 * this is by declaring ourselves wedged.
1450 i915_gem_set_wedged(i915);
1455 i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
1457 struct drm_i915_gem_object *obj;
1458 struct i915_vma *vma;
1461 obj = i915_gem_object_create_stolen(i915, size);
1463 obj = i915_gem_object_create_internal(i915, size);
1465 DRM_ERROR("Failed to allocate scratch page\n");
1466 return PTR_ERR(obj);
1469 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1475 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1479 i915->gt.scratch = vma;
1483 i915_gem_object_put(obj);
1487 static void i915_gem_fini_scratch(struct drm_i915_private *i915)
1489 i915_vma_unpin_and_release(&i915->gt.scratch, 0);
1492 static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
1494 struct intel_engine_cs *engine;
1495 enum intel_engine_id id;
1498 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1501 for_each_engine(engine, i915, id) {
1502 if (intel_engine_verify_workarounds(engine, "load"))
1509 int i915_gem_init(struct drm_i915_private *dev_priv)
1513 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1514 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1515 mkwrite_device_info(dev_priv)->page_sizes =
1516 I915_GTT_PAGE_SIZE_4K;
1518 dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
1520 i915_timelines_init(dev_priv);
1522 ret = i915_gem_init_userptr(dev_priv);
1526 ret = intel_uc_init_misc(dev_priv);
1530 ret = intel_wopcm_init(&dev_priv->wopcm);
1534 /* This is just a security blanket to placate dragons.
1535 * On some systems, we very sporadically observe that the first TLBs
1536 * used by the CS may be stale, despite us poking the TLB reset. If
1537 * we hold the forcewake during initialisation these problems
1538 * just magically go away.
1540 mutex_lock(&dev_priv->drm.struct_mutex);
1541 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1543 ret = i915_gem_init_ggtt(dev_priv);
1545 GEM_BUG_ON(ret == -EIO);
1549 ret = i915_gem_init_scratch(dev_priv,
1550 IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
1552 GEM_BUG_ON(ret == -EIO);
1556 ret = intel_engines_setup(dev_priv);
1558 GEM_BUG_ON(ret == -EIO);
1562 ret = i915_gem_contexts_init(dev_priv);
1564 GEM_BUG_ON(ret == -EIO);
1568 ret = intel_engines_init(dev_priv);
1570 GEM_BUG_ON(ret == -EIO);
1574 intel_init_gt_powersave(dev_priv);
1576 ret = intel_uc_init(dev_priv);
1580 ret = i915_gem_init_hw(dev_priv);
1585 * Despite its name intel_init_clock_gating applies both display
1586 * clock gating workarounds; GT mmio workarounds and the occasional
1587 * GT power context workaround. Worse, sometimes it includes a context
1588 * register workaround which we need to apply before we record the
1589 * default HW state for all contexts.
1591 * FIXME: break up the workarounds and apply them at the right time!
1593 intel_init_clock_gating(dev_priv);
1595 ret = intel_engines_verify_workarounds(dev_priv);
1599 ret = __intel_engines_record_defaults(dev_priv);
1603 if (i915_inject_load_failure()) {
1608 if (i915_inject_load_failure()) {
1613 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1614 mutex_unlock(&dev_priv->drm.struct_mutex);
1619 * Unwinding is complicated by that we want to handle -EIO to mean
1620 * disable GPU submission but keep KMS alive. We want to mark the
1621 * HW as irrevisibly wedged, but keep enough state around that the
1622 * driver doesn't explode during runtime.
1625 mutex_unlock(&dev_priv->drm.struct_mutex);
1627 i915_gem_set_wedged(dev_priv);
1628 i915_gem_suspend(dev_priv);
1629 i915_gem_suspend_late(dev_priv);
1631 i915_gem_drain_workqueue(dev_priv);
1633 mutex_lock(&dev_priv->drm.struct_mutex);
1634 intel_uc_fini_hw(dev_priv);
1636 intel_uc_fini(dev_priv);
1639 intel_cleanup_gt_powersave(dev_priv);
1640 intel_engines_cleanup(dev_priv);
1644 i915_gem_contexts_fini(dev_priv);
1646 i915_gem_fini_scratch(dev_priv);
1649 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1650 mutex_unlock(&dev_priv->drm.struct_mutex);
1653 intel_uc_fini_misc(dev_priv);
1656 i915_gem_cleanup_userptr(dev_priv);
1657 i915_timelines_fini(dev_priv);
1661 mutex_lock(&dev_priv->drm.struct_mutex);
1664 * Allow engine initialisation to fail by marking the GPU as
1665 * wedged. But we only want to do this where the GPU is angry,
1666 * for all other failure, such as an allocation failure, bail.
1668 if (!i915_reset_failed(dev_priv)) {
1669 i915_load_error(dev_priv,
1670 "Failed to initialize GPU, declaring it wedged!\n");
1671 i915_gem_set_wedged(dev_priv);
1674 /* Minimal basic recovery for KMS */
1675 ret = i915_ggtt_enable_hw(dev_priv);
1676 i915_gem_restore_gtt_mappings(dev_priv);
1677 i915_gem_restore_fences(dev_priv);
1678 intel_init_clock_gating(dev_priv);
1680 mutex_unlock(&dev_priv->drm.struct_mutex);
1683 i915_gem_drain_freed_objects(dev_priv);
1687 void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
1689 GEM_BUG_ON(dev_priv->gt.awake);
1691 intel_wakeref_auto_fini(&dev_priv->mm.userfault_wakeref);
1693 i915_gem_suspend_late(dev_priv);
1694 intel_disable_gt_powersave(dev_priv);
1696 /* Flush any outstanding unpin_work. */
1697 i915_gem_drain_workqueue(dev_priv);
1699 mutex_lock(&dev_priv->drm.struct_mutex);
1700 intel_uc_fini_hw(dev_priv);
1701 intel_uc_fini(dev_priv);
1702 mutex_unlock(&dev_priv->drm.struct_mutex);
1704 i915_gem_drain_freed_objects(dev_priv);
1707 void i915_gem_fini(struct drm_i915_private *dev_priv)
1709 mutex_lock(&dev_priv->drm.struct_mutex);
1710 intel_engines_cleanup(dev_priv);
1711 i915_gem_contexts_fini(dev_priv);
1712 i915_gem_fini_scratch(dev_priv);
1713 mutex_unlock(&dev_priv->drm.struct_mutex);
1715 intel_wa_list_free(&dev_priv->gt_wa_list);
1717 intel_cleanup_gt_powersave(dev_priv);
1719 intel_uc_fini_misc(dev_priv);
1720 i915_gem_cleanup_userptr(dev_priv);
1721 i915_timelines_fini(dev_priv);
1723 i915_gem_drain_freed_objects(dev_priv);
1725 WARN_ON(!list_empty(&dev_priv->contexts.list));
1728 void i915_gem_init_mmio(struct drm_i915_private *i915)
1730 i915_gem_sanitize(i915);
1734 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
1738 if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
1739 !IS_CHERRYVIEW(dev_priv))
1740 dev_priv->num_fence_regs = 32;
1741 else if (INTEL_GEN(dev_priv) >= 4 ||
1742 IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
1743 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
1744 dev_priv->num_fence_regs = 16;
1746 dev_priv->num_fence_regs = 8;
1748 if (intel_vgpu_active(dev_priv))
1749 dev_priv->num_fence_regs =
1750 I915_READ(vgtif_reg(avail_rs.fence_num));
1752 /* Initialize fence registers to zero */
1753 for (i = 0; i < dev_priv->num_fence_regs; i++) {
1754 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
1756 fence->i915 = dev_priv;
1758 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
1760 i915_gem_restore_fences(dev_priv);
1762 i915_gem_detect_bit_6_swizzle(dev_priv);
1765 static void i915_gem_init__mm(struct drm_i915_private *i915)
1767 spin_lock_init(&i915->mm.obj_lock);
1768 spin_lock_init(&i915->mm.free_lock);
1770 init_llist_head(&i915->mm.free_list);
1772 INIT_LIST_HEAD(&i915->mm.purge_list);
1773 INIT_LIST_HEAD(&i915->mm.unbound_list);
1774 INIT_LIST_HEAD(&i915->mm.bound_list);
1775 INIT_LIST_HEAD(&i915->mm.fence_list);
1777 INIT_LIST_HEAD(&i915->mm.userfault_list);
1778 intel_wakeref_auto_init(&i915->mm.userfault_wakeref, i915);
1780 i915_gem_init__objects(i915);
1783 int i915_gem_init_early(struct drm_i915_private *dev_priv)
1785 static struct lock_class_key reset_key;
1788 intel_gt_pm_init(dev_priv);
1790 INIT_LIST_HEAD(&dev_priv->gt.active_rings);
1791 INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
1792 spin_lock_init(&dev_priv->gt.closed_lock);
1793 lockdep_init_map(&dev_priv->gt.reset_lockmap,
1794 "i915.reset", &reset_key, 0);
1796 i915_gem_init__mm(dev_priv);
1797 i915_gem_init__pm(dev_priv);
1799 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
1800 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
1801 mutex_init(&dev_priv->gpu_error.wedge_mutex);
1802 init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
1804 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
1806 spin_lock_init(&dev_priv->fb_tracking.lock);
1808 err = i915_gemfs_init(dev_priv);
1810 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
1815 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1817 i915_gem_drain_freed_objects(dev_priv);
1818 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1819 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1820 WARN_ON(dev_priv->mm.shrink_count);
1822 cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
1824 i915_gemfs_fini(dev_priv);
1827 int i915_gem_freeze(struct drm_i915_private *dev_priv)
1829 /* Discard all purgeable objects, let userspace recover those as
1830 * required after resuming.
1832 i915_gem_shrink_all(dev_priv);
1837 int i915_gem_freeze_late(struct drm_i915_private *i915)
1839 struct drm_i915_gem_object *obj;
1840 struct list_head *phases[] = {
1841 &i915->mm.unbound_list,
1842 &i915->mm.bound_list,
1847 * Called just before we write the hibernation image.
1849 * We need to update the domain tracking to reflect that the CPU
1850 * will be accessing all the pages to create and restore from the
1851 * hibernation, and so upon restoration those pages will be in the
1854 * To make sure the hibernation image contains the latest state,
1855 * we update that state just before writing out the image.
1857 * To try and reduce the hibernation image, we manually shrink
1858 * the objects as well, see i915_gem_freeze()
1861 i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
1862 i915_gem_drain_freed_objects(i915);
1864 for (phase = phases; *phase; phase++) {
1865 list_for_each_entry(obj, *phase, mm.link) {
1866 i915_gem_object_lock(obj);
1867 WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
1868 i915_gem_object_unlock(obj);
1871 GEM_BUG_ON(!list_empty(&i915->mm.purge_list));
1876 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
1878 struct drm_i915_file_private *file_priv = file->driver_priv;
1879 struct i915_request *request;
1881 /* Clean up our request list when the client is going away, so that
1882 * later retire_requests won't dereference our soon-to-be-gone
1885 spin_lock(&file_priv->mm.lock);
1886 list_for_each_entry(request, &file_priv->mm.request_list, client_link)
1887 request->file_priv = NULL;
1888 spin_unlock(&file_priv->mm.lock);
1891 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1893 struct drm_i915_file_private *file_priv;
1898 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1902 file->driver_priv = file_priv;
1903 file_priv->dev_priv = i915;
1904 file_priv->file = file;
1906 spin_lock_init(&file_priv->mm.lock);
1907 INIT_LIST_HEAD(&file_priv->mm.request_list);
1909 file_priv->bsd_engine = -1;
1910 file_priv->hang_timestamp = jiffies;
1912 ret = i915_gem_context_open(i915, file);
1920 * i915_gem_track_fb - update frontbuffer tracking
1921 * @old: current GEM buffer for the frontbuffer slots
1922 * @new: new GEM buffer for the frontbuffer slots
1923 * @frontbuffer_bits: bitmask of frontbuffer slots
1925 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
1926 * from @old and setting them in @new. Both @old and @new can be NULL.
1928 void i915_gem_track_fb(struct drm_i915_gem_object *old,
1929 struct drm_i915_gem_object *new,
1930 unsigned frontbuffer_bits)
1932 /* Control of individual bits within the mask are guarded by
1933 * the owning plane->mutex, i.e. we can never see concurrent
1934 * manipulation of individual bits. But since the bitfield as a whole
1935 * is updated using RMW, we need to use atomics in order to update
1938 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
1939 BITS_PER_TYPE(atomic_t));
1942 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
1943 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
1947 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
1948 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
1952 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1953 #include "selftests/mock_gem_device.c"
1954 #include "selftests/i915_gem.c"