2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <drm/drm_vma_manager.h>
29 #include <drm/i915_drm.h>
30 #include <linux/dma-fence-array.h>
31 #include <linux/kthread.h>
32 #include <linux/reservation.h>
33 #include <linux/shmem_fs.h>
34 #include <linux/slab.h>
35 #include <linux/stop_machine.h>
36 #include <linux/swap.h>
37 #include <linux/pci.h>
38 #include <linux/dma-buf.h>
39 #include <linux/mman.h>
41 #include "display/intel_display.h"
42 #include "display/intel_frontbuffer.h"
44 #include "gem/i915_gem_clflush.h"
45 #include "gem/i915_gem_context.h"
46 #include "gem/i915_gem_ioctls.h"
47 #include "gem/i915_gem_pm.h"
48 #include "gem/i915_gemfs.h"
49 #include "gt/intel_gt_pm.h"
50 #include "gt/intel_mocs.h"
51 #include "gt/intel_reset.h"
52 #include "gt/intel_workarounds.h"
55 #include "i915_scatterlist.h"
56 #include "i915_trace.h"
57 #include "i915_vgpu.h"
59 #include "intel_drv.h"
63 insert_mappable_node(struct i915_ggtt *ggtt,
64 struct drm_mm_node *node, u32 size)
66 memset(node, 0, sizeof(*node));
67 return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
68 size, 0, I915_COLOR_UNEVICTABLE,
69 0, ggtt->mappable_end,
74 remove_mappable_node(struct drm_mm_node *node)
76 drm_mm_remove_node(node);
80 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
81 struct drm_file *file)
83 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
84 struct drm_i915_gem_get_aperture *args = data;
88 mutex_lock(&ggtt->vm.mutex);
90 pinned = ggtt->vm.reserved;
91 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
92 if (i915_vma_is_pinned(vma))
93 pinned += vma->node.size;
95 mutex_unlock(&ggtt->vm.mutex);
97 args->aper_size = ggtt->vm.total;
98 args->aper_available_size = args->aper_size - pinned;
103 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
105 struct i915_vma *vma;
106 LIST_HEAD(still_in_list);
109 lockdep_assert_held(&obj->base.dev->struct_mutex);
111 spin_lock(&obj->vma.lock);
112 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
115 list_move_tail(&vma->obj_link, &still_in_list);
116 spin_unlock(&obj->vma.lock);
118 ret = i915_vma_unbind(vma);
120 spin_lock(&obj->vma.lock);
122 list_splice(&still_in_list, &obj->vma.list);
123 spin_unlock(&obj->vma.lock);
129 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
130 struct drm_i915_gem_pwrite *args,
131 struct drm_file *file)
133 void *vaddr = obj->phys_handle->vaddr + args->offset;
134 char __user *user_data = u64_to_user_ptr(args->data_ptr);
136 /* We manually control the domain here and pretend that it
137 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
139 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
140 if (copy_from_user(vaddr, user_data, args->size))
143 drm_clflush_virt_range(vaddr, args->size);
144 i915_gem_chipset_flush(to_i915(obj->base.dev));
146 intel_fb_obj_flush(obj, ORIGIN_CPU);
151 i915_gem_create(struct drm_file *file,
152 struct drm_i915_private *dev_priv,
156 struct drm_i915_gem_object *obj;
161 size = round_up(*size_p, PAGE_SIZE);
165 /* Allocate the new object */
166 obj = i915_gem_object_create_shmem(dev_priv, size);
170 ret = drm_gem_handle_create(file, &obj->base, &handle);
171 /* drop reference from allocate - handle holds it now */
172 i915_gem_object_put(obj);
182 i915_gem_dumb_create(struct drm_file *file,
183 struct drm_device *dev,
184 struct drm_mode_create_dumb *args)
186 int cpp = DIV_ROUND_UP(args->bpp, 8);
191 format = DRM_FORMAT_C8;
194 format = DRM_FORMAT_RGB565;
197 format = DRM_FORMAT_XRGB8888;
203 /* have to work out size/pitch and return them */
204 args->pitch = ALIGN(args->width * cpp, 64);
206 /* align stride to page size so that we can remap */
207 if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
208 DRM_FORMAT_MOD_LINEAR))
209 args->pitch = ALIGN(args->pitch, 4096);
211 args->size = args->pitch * args->height;
212 return i915_gem_create(file, to_i915(dev),
213 &args->size, &args->handle);
217 * Creates a new mm object and returns a handle to it.
218 * @dev: drm device pointer
219 * @data: ioctl data blob
220 * @file: drm file pointer
223 i915_gem_create_ioctl(struct drm_device *dev, void *data,
224 struct drm_file *file)
226 struct drm_i915_private *dev_priv = to_i915(dev);
227 struct drm_i915_gem_create *args = data;
229 i915_gem_flush_free_objects(dev_priv);
231 return i915_gem_create(file, dev_priv,
232 &args->size, &args->handle);
235 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
237 intel_wakeref_t wakeref;
240 * No actual flushing is required for the GTT write domain for reads
241 * from the GTT domain. Writes to it "immediately" go to main memory
242 * as far as we know, so there's no chipset flush. It also doesn't
243 * land in the GPU render cache.
245 * However, we do have to enforce the order so that all writes through
246 * the GTT land before any writes to the device, such as updates to
249 * We also have to wait a bit for the writes to land from the GTT.
250 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
251 * timing. This issue has only been observed when switching quickly
252 * between GTT writes and CPU reads from inside the kernel on recent hw,
253 * and it appears to only affect discrete GTT blocks (i.e. on LLC
254 * system agents we cannot reproduce this behaviour, until Cannonlake
260 if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
263 i915_gem_chipset_flush(dev_priv);
265 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
266 struct intel_uncore *uncore = &dev_priv->uncore;
268 spin_lock_irq(&uncore->lock);
269 intel_uncore_posting_read_fw(uncore,
270 RING_HEAD(RENDER_RING_BASE));
271 spin_unlock_irq(&uncore->lock);
276 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
285 drm_clflush_virt_range(vaddr + offset, len);
287 ret = __copy_to_user(user_data, vaddr + offset, len);
291 return ret ? -EFAULT : 0;
295 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
296 struct drm_i915_gem_pread *args)
298 unsigned int needs_clflush;
299 unsigned int idx, offset;
300 struct dma_fence *fence;
301 char __user *user_data;
305 ret = i915_gem_object_prepare_read(obj, &needs_clflush);
309 fence = i915_gem_object_lock_fence(obj);
310 i915_gem_object_finish_access(obj);
315 user_data = u64_to_user_ptr(args->data_ptr);
316 offset = offset_in_page(args->offset);
317 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
318 struct page *page = i915_gem_object_get_page(obj, idx);
319 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
321 ret = shmem_pread(page, offset, length, user_data,
331 i915_gem_object_unlock_fence(obj, fence);
336 gtt_user_read(struct io_mapping *mapping,
337 loff_t base, int offset,
338 char __user *user_data, int length)
341 unsigned long unwritten;
343 /* We can use the cpu mem copy function because this is X86. */
344 vaddr = io_mapping_map_atomic_wc(mapping, base);
345 unwritten = __copy_to_user_inatomic(user_data,
346 (void __force *)vaddr + offset,
348 io_mapping_unmap_atomic(vaddr);
350 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
351 unwritten = copy_to_user(user_data,
352 (void __force *)vaddr + offset,
354 io_mapping_unmap(vaddr);
360 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
361 const struct drm_i915_gem_pread *args)
363 struct drm_i915_private *i915 = to_i915(obj->base.dev);
364 struct i915_ggtt *ggtt = &i915->ggtt;
365 intel_wakeref_t wakeref;
366 struct drm_mm_node node;
367 struct dma_fence *fence;
368 void __user *user_data;
369 struct i915_vma *vma;
373 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
377 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
378 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
383 node.start = i915_ggtt_offset(vma);
384 node.allocated = false;
385 ret = i915_vma_put_fence(vma);
392 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
395 GEM_BUG_ON(!node.allocated);
398 mutex_unlock(&i915->drm.struct_mutex);
400 ret = i915_gem_object_lock_interruptible(obj);
404 ret = i915_gem_object_set_to_gtt_domain(obj, false);
406 i915_gem_object_unlock(obj);
410 fence = i915_gem_object_lock_fence(obj);
411 i915_gem_object_unlock(obj);
417 user_data = u64_to_user_ptr(args->data_ptr);
419 offset = args->offset;
422 /* Operation in this page
424 * page_base = page offset within aperture
425 * page_offset = offset within page
426 * page_length = bytes to copy for this page
428 u32 page_base = node.start;
429 unsigned page_offset = offset_in_page(offset);
430 unsigned page_length = PAGE_SIZE - page_offset;
431 page_length = remain < page_length ? remain : page_length;
432 if (node.allocated) {
434 ggtt->vm.insert_page(&ggtt->vm,
435 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
436 node.start, I915_CACHE_NONE, 0);
439 page_base += offset & PAGE_MASK;
442 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
443 user_data, page_length)) {
448 remain -= page_length;
449 user_data += page_length;
450 offset += page_length;
453 i915_gem_object_unlock_fence(obj, fence);
455 mutex_lock(&i915->drm.struct_mutex);
456 if (node.allocated) {
458 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
459 remove_mappable_node(&node);
464 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
465 mutex_unlock(&i915->drm.struct_mutex);
471 * Reads data from the object referenced by handle.
472 * @dev: drm device pointer
473 * @data: ioctl data blob
474 * @file: drm file pointer
476 * On error, the contents of *data are undefined.
479 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
480 struct drm_file *file)
482 struct drm_i915_gem_pread *args = data;
483 struct drm_i915_gem_object *obj;
489 if (!access_ok(u64_to_user_ptr(args->data_ptr),
493 obj = i915_gem_object_lookup(file, args->handle);
497 /* Bounds check source. */
498 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
503 trace_i915_gem_object_pread(obj, args->offset, args->size);
505 ret = i915_gem_object_wait(obj,
506 I915_WAIT_INTERRUPTIBLE,
507 MAX_SCHEDULE_TIMEOUT);
511 ret = i915_gem_object_pin_pages(obj);
515 ret = i915_gem_shmem_pread(obj, args);
516 if (ret == -EFAULT || ret == -ENODEV)
517 ret = i915_gem_gtt_pread(obj, args);
519 i915_gem_object_unpin_pages(obj);
521 i915_gem_object_put(obj);
525 /* This is the fast write path which cannot handle
526 * page faults in the source data
530 ggtt_write(struct io_mapping *mapping,
531 loff_t base, int offset,
532 char __user *user_data, int length)
535 unsigned long unwritten;
537 /* We can use the cpu mem copy function because this is X86. */
538 vaddr = io_mapping_map_atomic_wc(mapping, base);
539 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
541 io_mapping_unmap_atomic(vaddr);
543 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
544 unwritten = copy_from_user((void __force *)vaddr + offset,
546 io_mapping_unmap(vaddr);
553 * This is the fast pwrite path, where we copy the data directly from the
554 * user into the GTT, uncached.
555 * @obj: i915 GEM object
556 * @args: pwrite arguments structure
559 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
560 const struct drm_i915_gem_pwrite *args)
562 struct drm_i915_private *i915 = to_i915(obj->base.dev);
563 struct i915_ggtt *ggtt = &i915->ggtt;
564 struct intel_runtime_pm *rpm = &i915->runtime_pm;
565 intel_wakeref_t wakeref;
566 struct drm_mm_node node;
567 struct dma_fence *fence;
568 struct i915_vma *vma;
570 void __user *user_data;
573 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
577 if (i915_gem_object_has_struct_page(obj)) {
579 * Avoid waking the device up if we can fallback, as
580 * waking/resuming is very slow (worst-case 10-100 ms
581 * depending on PCI sleeps and our own resume time).
582 * This easily dwarfs any performance advantage from
583 * using the cache bypass of indirect GGTT access.
585 wakeref = intel_runtime_pm_get_if_in_use(rpm);
591 /* No backing pages, no fallback, we must force GGTT access */
592 wakeref = intel_runtime_pm_get(rpm);
595 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
600 node.start = i915_ggtt_offset(vma);
601 node.allocated = false;
602 ret = i915_vma_put_fence(vma);
609 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
612 GEM_BUG_ON(!node.allocated);
615 mutex_unlock(&i915->drm.struct_mutex);
617 ret = i915_gem_object_lock_interruptible(obj);
621 ret = i915_gem_object_set_to_gtt_domain(obj, true);
623 i915_gem_object_unlock(obj);
627 fence = i915_gem_object_lock_fence(obj);
628 i915_gem_object_unlock(obj);
634 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
636 user_data = u64_to_user_ptr(args->data_ptr);
637 offset = args->offset;
640 /* Operation in this page
642 * page_base = page offset within aperture
643 * page_offset = offset within page
644 * page_length = bytes to copy for this page
646 u32 page_base = node.start;
647 unsigned int page_offset = offset_in_page(offset);
648 unsigned int page_length = PAGE_SIZE - page_offset;
649 page_length = remain < page_length ? remain : page_length;
650 if (node.allocated) {
651 wmb(); /* flush the write before we modify the GGTT */
652 ggtt->vm.insert_page(&ggtt->vm,
653 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
654 node.start, I915_CACHE_NONE, 0);
655 wmb(); /* flush modifications to the GGTT (insert_page) */
657 page_base += offset & PAGE_MASK;
659 /* If we get a fault while copying data, then (presumably) our
660 * source page isn't available. Return the error and we'll
661 * retry in the slow path.
662 * If the object is non-shmem backed, we retry again with the
663 * path that handles page fault.
665 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
666 user_data, page_length)) {
671 remain -= page_length;
672 user_data += page_length;
673 offset += page_length;
675 intel_fb_obj_flush(obj, ORIGIN_CPU);
677 i915_gem_object_unlock_fence(obj, fence);
679 mutex_lock(&i915->drm.struct_mutex);
680 if (node.allocated) {
682 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
683 remove_mappable_node(&node);
688 intel_runtime_pm_put(rpm, wakeref);
690 mutex_unlock(&i915->drm.struct_mutex);
694 /* Per-page copy function for the shmem pwrite fastpath.
695 * Flushes invalid cachelines before writing to the target if
696 * needs_clflush_before is set and flushes out any written cachelines after
697 * writing if needs_clflush is set.
700 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
701 bool needs_clflush_before,
702 bool needs_clflush_after)
709 if (needs_clflush_before)
710 drm_clflush_virt_range(vaddr + offset, len);
712 ret = __copy_from_user(vaddr + offset, user_data, len);
713 if (!ret && needs_clflush_after)
714 drm_clflush_virt_range(vaddr + offset, len);
718 return ret ? -EFAULT : 0;
722 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
723 const struct drm_i915_gem_pwrite *args)
725 unsigned int partial_cacheline_write;
726 unsigned int needs_clflush;
727 unsigned int offset, idx;
728 struct dma_fence *fence;
729 void __user *user_data;
733 ret = i915_gem_object_prepare_write(obj, &needs_clflush);
737 fence = i915_gem_object_lock_fence(obj);
738 i915_gem_object_finish_access(obj);
742 /* If we don't overwrite a cacheline completely we need to be
743 * careful to have up-to-date data by first clflushing. Don't
744 * overcomplicate things and flush the entire patch.
746 partial_cacheline_write = 0;
747 if (needs_clflush & CLFLUSH_BEFORE)
748 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
750 user_data = u64_to_user_ptr(args->data_ptr);
752 offset = offset_in_page(args->offset);
753 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
754 struct page *page = i915_gem_object_get_page(obj, idx);
755 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
757 ret = shmem_pwrite(page, offset, length, user_data,
758 (offset | length) & partial_cacheline_write,
759 needs_clflush & CLFLUSH_AFTER);
768 intel_fb_obj_flush(obj, ORIGIN_CPU);
769 i915_gem_object_unlock_fence(obj, fence);
775 * Writes data to the object referenced by handle.
777 * @data: ioctl data blob
780 * On error, the contents of the buffer that were to be modified are undefined.
783 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
784 struct drm_file *file)
786 struct drm_i915_gem_pwrite *args = data;
787 struct drm_i915_gem_object *obj;
793 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
796 obj = i915_gem_object_lookup(file, args->handle);
800 /* Bounds check destination. */
801 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
806 /* Writes not allowed into this read-only object */
807 if (i915_gem_object_is_readonly(obj)) {
812 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
815 if (obj->ops->pwrite)
816 ret = obj->ops->pwrite(obj, args);
820 ret = i915_gem_object_wait(obj,
821 I915_WAIT_INTERRUPTIBLE |
823 MAX_SCHEDULE_TIMEOUT);
827 ret = i915_gem_object_pin_pages(obj);
832 /* We can only do the GTT pwrite on untiled buffers, as otherwise
833 * it would end up going through the fenced access, and we'll get
834 * different detiling behavior between reading and writing.
835 * pread/pwrite currently are reading and writing from the CPU
836 * perspective, requiring manual detiling by the client.
838 if (!i915_gem_object_has_struct_page(obj) ||
839 cpu_write_needs_clflush(obj))
840 /* Note that the gtt paths might fail with non-page-backed user
841 * pointers (e.g. gtt mappings when moving data between
842 * textures). Fallback to the shmem path in that case.
844 ret = i915_gem_gtt_pwrite_fast(obj, args);
846 if (ret == -EFAULT || ret == -ENOSPC) {
847 if (obj->phys_handle)
848 ret = i915_gem_phys_pwrite(obj, args, file);
850 ret = i915_gem_shmem_pwrite(obj, args);
853 i915_gem_object_unpin_pages(obj);
855 i915_gem_object_put(obj);
860 * Called when user space has done writes to this buffer
862 * @data: ioctl data blob
866 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
867 struct drm_file *file)
869 struct drm_i915_gem_sw_finish *args = data;
870 struct drm_i915_gem_object *obj;
872 obj = i915_gem_object_lookup(file, args->handle);
877 * Proxy objects are barred from CPU access, so there is no
878 * need to ban sw_finish as it is a nop.
881 /* Pinned buffers may be scanout, so flush the cache */
882 i915_gem_object_flush_if_display(obj);
883 i915_gem_object_put(obj);
888 void i915_gem_runtime_suspend(struct drm_i915_private *i915)
890 struct drm_i915_gem_object *obj, *on;
894 * Only called during RPM suspend. All users of the userfault_list
895 * must be holding an RPM wakeref to ensure that this can not
896 * run concurrently with themselves (and use the struct_mutex for
897 * protection between themselves).
900 list_for_each_entry_safe(obj, on,
901 &i915->ggtt.userfault_list, userfault_link)
902 __i915_gem_object_release_mmap(obj);
905 * The fence will be lost when the device powers down. If any were
906 * in use by hardware (i.e. they are pinned), we should not be powering
907 * down! All other fences will be reacquired by the user upon waking.
909 for (i = 0; i < i915->ggtt.num_fences; i++) {
910 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
913 * Ideally we want to assert that the fence register is not
914 * live at this point (i.e. that no piece of code will be
915 * trying to write through fence + GTT, as that both violates
916 * our tracking of activity and associated locking/barriers,
917 * but also is illegal given that the hw is powered down).
919 * Previously we used reg->pin_count as a "liveness" indicator.
920 * That is not sufficient, and we need a more fine-grained
921 * tool if we want to have a sanity check here.
927 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
932 static int wait_for_engines(struct drm_i915_private *i915)
934 if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
935 dev_err(i915->drm.dev,
936 "Failed to idle engines, declaring wedged!\n");
938 i915_gem_set_wedged(i915);
946 wait_for_timelines(struct drm_i915_private *i915,
947 unsigned int flags, long timeout)
949 struct i915_gt_timelines *gt = &i915->gt.timelines;
950 struct i915_timeline *tl;
952 mutex_lock(>->mutex);
953 list_for_each_entry(tl, >->active_list, link) {
954 struct i915_request *rq;
956 rq = i915_active_request_get_unlocked(&tl->last_request);
960 mutex_unlock(>->mutex);
965 * Switching to the kernel context is often used a synchronous
966 * step prior to idling, e.g. in suspend for flushing all
967 * current operations to memory before sleeping. These we
968 * want to complete as quickly as possible to avoid prolonged
969 * stalls, so allow the gpu to boost to maximum clocks.
971 if (flags & I915_WAIT_FOR_IDLE_BOOST)
974 timeout = i915_request_wait(rq, flags, timeout);
975 i915_request_put(rq);
979 /* restart after reacquiring the lock */
980 mutex_lock(>->mutex);
981 tl = list_entry(>->active_list, typeof(*tl), link);
983 mutex_unlock(>->mutex);
988 int i915_gem_wait_for_idle(struct drm_i915_private *i915,
989 unsigned int flags, long timeout)
991 GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
992 flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
993 timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
994 yesno(i915->gt.awake));
996 /* If the device is asleep, we have no requests outstanding */
997 if (!READ_ONCE(i915->gt.awake))
1000 timeout = wait_for_timelines(i915, flags, timeout);
1004 if (flags & I915_WAIT_LOCKED) {
1007 lockdep_assert_held(&i915->drm.struct_mutex);
1009 err = wait_for_engines(i915);
1013 i915_retire_requests(i915);
1020 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1021 const struct i915_ggtt_view *view,
1026 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1027 struct i915_address_space *vm = &dev_priv->ggtt.vm;
1028 struct i915_vma *vma;
1031 lockdep_assert_held(&obj->base.dev->struct_mutex);
1033 if (flags & PIN_MAPPABLE &&
1034 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
1035 /* If the required space is larger than the available
1036 * aperture, we will not able to find a slot for the
1037 * object and unbinding the object now will be in
1038 * vain. Worse, doing so may cause us to ping-pong
1039 * the object in and out of the Global GTT and
1040 * waste a lot of cycles under the mutex.
1042 if (obj->base.size > dev_priv->ggtt.mappable_end)
1043 return ERR_PTR(-E2BIG);
1045 /* If NONBLOCK is set the caller is optimistically
1046 * trying to cache the full object within the mappable
1047 * aperture, and *must* have a fallback in place for
1048 * situations where we cannot bind the object. We
1049 * can be a little more lax here and use the fallback
1050 * more often to avoid costly migrations of ourselves
1051 * and other objects within the aperture.
1053 * Half-the-aperture is used as a simple heuristic.
1054 * More interesting would to do search for a free
1055 * block prior to making the commitment to unbind.
1056 * That caters for the self-harm case, and with a
1057 * little more heuristics (e.g. NOFAULT, NOEVICT)
1058 * we could try to minimise harm to others.
1060 if (flags & PIN_NONBLOCK &&
1061 obj->base.size > dev_priv->ggtt.mappable_end / 2)
1062 return ERR_PTR(-ENOSPC);
1065 vma = i915_vma_instance(obj, vm, view);
1069 if (i915_vma_misplaced(vma, size, alignment, flags)) {
1070 if (flags & PIN_NONBLOCK) {
1071 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
1072 return ERR_PTR(-ENOSPC);
1074 if (flags & PIN_MAPPABLE &&
1075 vma->fence_size > dev_priv->ggtt.mappable_end / 2)
1076 return ERR_PTR(-ENOSPC);
1079 WARN(i915_vma_is_pinned(vma),
1080 "bo is already pinned in ggtt with incorrect alignment:"
1081 " offset=%08x, req.alignment=%llx,"
1082 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
1083 i915_ggtt_offset(vma), alignment,
1084 !!(flags & PIN_MAPPABLE),
1085 i915_vma_is_map_and_fenceable(vma));
1086 ret = i915_vma_unbind(vma);
1088 return ERR_PTR(ret);
1091 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
1093 return ERR_PTR(ret);
1099 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1100 struct drm_file *file_priv)
1102 struct drm_i915_private *i915 = to_i915(dev);
1103 struct drm_i915_gem_madvise *args = data;
1104 struct drm_i915_gem_object *obj;
1107 switch (args->madv) {
1108 case I915_MADV_DONTNEED:
1109 case I915_MADV_WILLNEED:
1115 obj = i915_gem_object_lookup(file_priv, args->handle);
1119 err = mutex_lock_interruptible(&obj->mm.lock);
1123 if (i915_gem_object_has_pages(obj) &&
1124 i915_gem_object_is_tiled(obj) &&
1125 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
1126 if (obj->mm.madv == I915_MADV_WILLNEED) {
1127 GEM_BUG_ON(!obj->mm.quirked);
1128 __i915_gem_object_unpin_pages(obj);
1129 obj->mm.quirked = false;
1131 if (args->madv == I915_MADV_WILLNEED) {
1132 GEM_BUG_ON(obj->mm.quirked);
1133 __i915_gem_object_pin_pages(obj);
1134 obj->mm.quirked = true;
1138 if (obj->mm.madv != __I915_MADV_PURGED)
1139 obj->mm.madv = args->madv;
1141 if (i915_gem_object_has_pages(obj)) {
1142 struct list_head *list;
1144 if (i915_gem_object_is_shrinkable(obj)) {
1145 unsigned long flags;
1147 spin_lock_irqsave(&i915->mm.obj_lock, flags);
1149 if (obj->mm.madv != I915_MADV_WILLNEED)
1150 list = &i915->mm.purge_list;
1152 list = &i915->mm.shrink_list;
1153 list_move_tail(&obj->mm.link, list);
1155 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1159 /* if the object is no longer attached, discard its backing storage */
1160 if (obj->mm.madv == I915_MADV_DONTNEED &&
1161 !i915_gem_object_has_pages(obj))
1162 i915_gem_object_truncate(obj);
1164 args->retained = obj->mm.madv != __I915_MADV_PURGED;
1165 mutex_unlock(&obj->mm.lock);
1168 i915_gem_object_put(obj);
1172 void i915_gem_sanitize(struct drm_i915_private *i915)
1174 intel_wakeref_t wakeref;
1178 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1179 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
1182 * As we have just resumed the machine and woken the device up from
1183 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
1184 * back to defaults, recovering from whatever wedged state we left it
1185 * in and so worth trying to use the device once more.
1187 if (i915_terminally_wedged(i915))
1188 i915_gem_unset_wedged(i915);
1191 * If we inherit context state from the BIOS or earlier occupants
1192 * of the GPU, the GPU may be in an inconsistent state when we
1193 * try to take over. The only way to remove the earlier state
1194 * is by resetting. However, resetting on earlier gen is tricky as
1195 * it may impact the display and we are uncertain about the stability
1196 * of the reset, so this could be applied to even earlier gen.
1198 intel_gt_sanitize(i915, false);
1200 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
1201 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1204 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
1206 if (INTEL_GEN(dev_priv) < 5 ||
1207 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
1210 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
1211 DISP_TILE_SURFACE_SWIZZLING);
1213 if (IS_GEN(dev_priv, 5))
1216 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
1217 if (IS_GEN(dev_priv, 6))
1218 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
1219 else if (IS_GEN(dev_priv, 7))
1220 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
1221 else if (IS_GEN(dev_priv, 8))
1222 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
1227 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
1229 I915_WRITE(RING_CTL(base), 0);
1230 I915_WRITE(RING_HEAD(base), 0);
1231 I915_WRITE(RING_TAIL(base), 0);
1232 I915_WRITE(RING_START(base), 0);
1235 static void init_unused_rings(struct drm_i915_private *dev_priv)
1237 if (IS_I830(dev_priv)) {
1238 init_unused_ring(dev_priv, PRB1_BASE);
1239 init_unused_ring(dev_priv, SRB0_BASE);
1240 init_unused_ring(dev_priv, SRB1_BASE);
1241 init_unused_ring(dev_priv, SRB2_BASE);
1242 init_unused_ring(dev_priv, SRB3_BASE);
1243 } else if (IS_GEN(dev_priv, 2)) {
1244 init_unused_ring(dev_priv, SRB0_BASE);
1245 init_unused_ring(dev_priv, SRB1_BASE);
1246 } else if (IS_GEN(dev_priv, 3)) {
1247 init_unused_ring(dev_priv, PRB1_BASE);
1248 init_unused_ring(dev_priv, PRB2_BASE);
1252 int i915_gem_init_hw(struct drm_i915_private *dev_priv)
1256 dev_priv->gt.last_init_time = ktime_get();
1258 /* Double layer security blanket, see i915_gem_init() */
1259 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1261 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
1262 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
1264 if (IS_HASWELL(dev_priv))
1265 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
1266 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
1268 /* Apply the GT workarounds... */
1269 intel_gt_apply_workarounds(dev_priv);
1270 /* ...and determine whether they are sticking. */
1271 intel_gt_verify_workarounds(dev_priv, "init");
1273 i915_gem_init_swizzling(dev_priv);
1276 * At least 830 can leave some of the unused rings
1277 * "active" (ie. head != tail) after resume which
1278 * will prevent c3 entry. Makes sure all unused rings
1281 init_unused_rings(dev_priv);
1283 BUG_ON(!dev_priv->kernel_context);
1284 ret = i915_terminally_wedged(dev_priv);
1288 ret = i915_ppgtt_init_hw(dev_priv);
1290 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
1294 ret = intel_wopcm_init_hw(&dev_priv->wopcm);
1296 DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
1300 /* We can't enable contexts until all firmware is loaded */
1301 ret = intel_uc_init_hw(dev_priv);
1303 DRM_ERROR("Enabling uc failed (%d)\n", ret);
1307 intel_mocs_init_l3cc_table(dev_priv);
1309 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1311 intel_engines_set_scheduler_caps(dev_priv);
1315 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1319 static int __intel_engines_record_defaults(struct drm_i915_private *i915)
1321 struct intel_engine_cs *engine;
1322 struct i915_gem_context *ctx;
1323 struct i915_gem_engines *e;
1324 enum intel_engine_id id;
1328 * As we reset the gpu during very early sanitisation, the current
1329 * register state on the GPU should reflect its defaults values.
1330 * We load a context onto the hw (with restore-inhibit), then switch
1331 * over to a second context to save that default register state. We
1332 * can then prime every new context with that state so they all start
1333 * from the same default HW values.
1336 ctx = i915_gem_context_create_kernel(i915, 0);
1338 return PTR_ERR(ctx);
1340 e = i915_gem_context_lock_engines(ctx);
1342 for_each_engine(engine, i915, id) {
1343 struct intel_context *ce = e->engines[id];
1344 struct i915_request *rq;
1346 rq = intel_context_create_request(ce);
1353 if (rq->engine->init_context)
1354 err = rq->engine->init_context(rq);
1356 i915_request_add(rq);
1361 /* Flush the default context image to memory, and enable powersaving. */
1362 if (!i915_gem_load_power_context(i915)) {
1367 for_each_engine(engine, i915, id) {
1368 struct intel_context *ce = e->engines[id];
1369 struct i915_vma *state = ce->state;
1375 GEM_BUG_ON(intel_context_is_pinned(ce));
1378 * As we will hold a reference to the logical state, it will
1379 * not be torn down with the context, and importantly the
1380 * object will hold onto its vma (making it possible for a
1381 * stray GTT write to corrupt our defaults). Unmap the vma
1382 * from the GTT to prevent such accidents and reclaim the
1385 err = i915_vma_unbind(state);
1389 i915_gem_object_lock(state->obj);
1390 err = i915_gem_object_set_to_cpu_domain(state->obj, false);
1391 i915_gem_object_unlock(state->obj);
1395 engine->default_state = i915_gem_object_get(state->obj);
1396 i915_gem_object_set_cache_coherency(engine->default_state,
1399 /* Check we can acquire the image of the context state */
1400 vaddr = i915_gem_object_pin_map(engine->default_state,
1402 if (IS_ERR(vaddr)) {
1403 err = PTR_ERR(vaddr);
1407 i915_gem_object_unpin_map(engine->default_state);
1410 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
1411 unsigned int found = intel_engines_has_context_isolation(i915);
1414 * Make sure that classes with multiple engine instances all
1415 * share the same basic configuration.
1417 for_each_engine(engine, i915, id) {
1418 unsigned int bit = BIT(engine->uabi_class);
1419 unsigned int expected = engine->default_state ? bit : 0;
1421 if ((found & bit) != expected) {
1422 DRM_ERROR("mismatching default context state for class %d on engine %s\n",
1423 engine->uabi_class, engine->name);
1429 i915_gem_context_unlock_engines(ctx);
1430 i915_gem_context_set_closed(ctx);
1431 i915_gem_context_put(ctx);
1436 * If we have to abandon now, we expect the engines to be idle
1437 * and ready to be torn-down. The quickest way we can accomplish
1438 * this is by declaring ourselves wedged.
1440 i915_gem_set_wedged(i915);
1445 i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
1447 struct drm_i915_gem_object *obj;
1448 struct i915_vma *vma;
1451 obj = i915_gem_object_create_stolen(i915, size);
1453 obj = i915_gem_object_create_internal(i915, size);
1455 DRM_ERROR("Failed to allocate scratch page\n");
1456 return PTR_ERR(obj);
1459 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1465 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1469 i915->gt.scratch = vma;
1473 i915_gem_object_put(obj);
1477 static void i915_gem_fini_scratch(struct drm_i915_private *i915)
1479 i915_vma_unpin_and_release(&i915->gt.scratch, 0);
1482 static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
1484 struct intel_engine_cs *engine;
1485 enum intel_engine_id id;
1488 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1491 for_each_engine(engine, i915, id) {
1492 if (intel_engine_verify_workarounds(engine, "load"))
1499 int i915_gem_init(struct drm_i915_private *dev_priv)
1503 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
1504 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1505 mkwrite_device_info(dev_priv)->page_sizes =
1506 I915_GTT_PAGE_SIZE_4K;
1508 dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
1510 i915_timelines_init(dev_priv);
1512 ret = i915_gem_init_userptr(dev_priv);
1516 ret = intel_uc_init_misc(dev_priv);
1520 ret = intel_wopcm_init(&dev_priv->wopcm);
1524 /* This is just a security blanket to placate dragons.
1525 * On some systems, we very sporadically observe that the first TLBs
1526 * used by the CS may be stale, despite us poking the TLB reset. If
1527 * we hold the forcewake during initialisation these problems
1528 * just magically go away.
1530 mutex_lock(&dev_priv->drm.struct_mutex);
1531 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1533 ret = i915_gem_init_ggtt(dev_priv);
1535 GEM_BUG_ON(ret == -EIO);
1539 ret = i915_gem_init_scratch(dev_priv,
1540 IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
1542 GEM_BUG_ON(ret == -EIO);
1546 ret = intel_engines_setup(dev_priv);
1548 GEM_BUG_ON(ret == -EIO);
1552 ret = i915_gem_contexts_init(dev_priv);
1554 GEM_BUG_ON(ret == -EIO);
1558 ret = intel_engines_init(dev_priv);
1560 GEM_BUG_ON(ret == -EIO);
1564 intel_init_gt_powersave(dev_priv);
1566 ret = intel_uc_init(dev_priv);
1570 ret = i915_gem_init_hw(dev_priv);
1574 /* Only when the HW is re-initialised, can we replay the requests */
1575 ret = intel_gt_resume(dev_priv);
1580 * Despite its name intel_init_clock_gating applies both display
1581 * clock gating workarounds; GT mmio workarounds and the occasional
1582 * GT power context workaround. Worse, sometimes it includes a context
1583 * register workaround which we need to apply before we record the
1584 * default HW state for all contexts.
1586 * FIXME: break up the workarounds and apply them at the right time!
1588 intel_init_clock_gating(dev_priv);
1590 ret = intel_engines_verify_workarounds(dev_priv);
1594 ret = __intel_engines_record_defaults(dev_priv);
1598 if (i915_inject_load_failure()) {
1603 if (i915_inject_load_failure()) {
1608 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1609 mutex_unlock(&dev_priv->drm.struct_mutex);
1614 * Unwinding is complicated by that we want to handle -EIO to mean
1615 * disable GPU submission but keep KMS alive. We want to mark the
1616 * HW as irrevisibly wedged, but keep enough state around that the
1617 * driver doesn't explode during runtime.
1620 mutex_unlock(&dev_priv->drm.struct_mutex);
1622 i915_gem_set_wedged(dev_priv);
1623 i915_gem_suspend(dev_priv);
1624 i915_gem_suspend_late(dev_priv);
1626 i915_gem_drain_workqueue(dev_priv);
1628 mutex_lock(&dev_priv->drm.struct_mutex);
1630 intel_uc_fini_hw(dev_priv);
1632 intel_uc_fini(dev_priv);
1635 intel_cleanup_gt_powersave(dev_priv);
1636 intel_engines_cleanup(dev_priv);
1640 i915_gem_contexts_fini(dev_priv);
1642 i915_gem_fini_scratch(dev_priv);
1645 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1646 mutex_unlock(&dev_priv->drm.struct_mutex);
1649 intel_uc_fini_misc(dev_priv);
1652 i915_gem_cleanup_userptr(dev_priv);
1653 i915_timelines_fini(dev_priv);
1657 mutex_lock(&dev_priv->drm.struct_mutex);
1660 * Allow engine initialisation to fail by marking the GPU as
1661 * wedged. But we only want to do this where the GPU is angry,
1662 * for all other failure, such as an allocation failure, bail.
1664 if (!i915_reset_failed(dev_priv)) {
1665 i915_load_error(dev_priv,
1666 "Failed to initialize GPU, declaring it wedged!\n");
1667 i915_gem_set_wedged(dev_priv);
1670 /* Minimal basic recovery for KMS */
1671 ret = i915_ggtt_enable_hw(dev_priv);
1672 i915_gem_restore_gtt_mappings(dev_priv);
1673 i915_gem_restore_fences(dev_priv);
1674 intel_init_clock_gating(dev_priv);
1676 mutex_unlock(&dev_priv->drm.struct_mutex);
1679 i915_gem_drain_freed_objects(dev_priv);
1683 void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
1685 GEM_BUG_ON(dev_priv->gt.awake);
1687 intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
1689 i915_gem_suspend_late(dev_priv);
1690 intel_disable_gt_powersave(dev_priv);
1692 /* Flush any outstanding unpin_work. */
1693 i915_gem_drain_workqueue(dev_priv);
1695 mutex_lock(&dev_priv->drm.struct_mutex);
1696 intel_uc_fini_hw(dev_priv);
1697 intel_uc_fini(dev_priv);
1698 mutex_unlock(&dev_priv->drm.struct_mutex);
1700 i915_gem_drain_freed_objects(dev_priv);
1703 void i915_gem_fini(struct drm_i915_private *dev_priv)
1705 mutex_lock(&dev_priv->drm.struct_mutex);
1706 intel_engines_cleanup(dev_priv);
1707 i915_gem_contexts_fini(dev_priv);
1708 i915_gem_fini_scratch(dev_priv);
1709 mutex_unlock(&dev_priv->drm.struct_mutex);
1711 intel_wa_list_free(&dev_priv->gt_wa_list);
1713 intel_cleanup_gt_powersave(dev_priv);
1715 intel_uc_fini_misc(dev_priv);
1716 i915_gem_cleanup_userptr(dev_priv);
1717 i915_timelines_fini(dev_priv);
1719 i915_gem_drain_freed_objects(dev_priv);
1721 WARN_ON(!list_empty(&dev_priv->contexts.list));
1724 void i915_gem_init_mmio(struct drm_i915_private *i915)
1726 i915_gem_sanitize(i915);
1729 static void i915_gem_init__mm(struct drm_i915_private *i915)
1731 spin_lock_init(&i915->mm.obj_lock);
1732 spin_lock_init(&i915->mm.free_lock);
1734 init_llist_head(&i915->mm.free_list);
1736 INIT_LIST_HEAD(&i915->mm.purge_list);
1737 INIT_LIST_HEAD(&i915->mm.shrink_list);
1739 i915_gem_init__objects(i915);
1742 int i915_gem_init_early(struct drm_i915_private *dev_priv)
1746 intel_gt_pm_init(dev_priv);
1748 INIT_LIST_HEAD(&dev_priv->gt.active_rings);
1749 INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
1750 spin_lock_init(&dev_priv->gt.closed_lock);
1752 i915_gem_init__mm(dev_priv);
1753 i915_gem_init__pm(dev_priv);
1755 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
1756 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
1757 mutex_init(&dev_priv->gpu_error.wedge_mutex);
1758 init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
1760 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
1762 spin_lock_init(&dev_priv->fb_tracking.lock);
1764 err = i915_gemfs_init(dev_priv);
1766 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
1771 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1773 i915_gem_drain_freed_objects(dev_priv);
1774 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
1775 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1776 WARN_ON(dev_priv->mm.shrink_count);
1778 cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
1780 i915_gemfs_fini(dev_priv);
1783 int i915_gem_freeze(struct drm_i915_private *dev_priv)
1785 /* Discard all purgeable objects, let userspace recover those as
1786 * required after resuming.
1788 i915_gem_shrink_all(dev_priv);
1793 int i915_gem_freeze_late(struct drm_i915_private *i915)
1795 struct drm_i915_gem_object *obj;
1796 intel_wakeref_t wakeref;
1799 * Called just before we write the hibernation image.
1801 * We need to update the domain tracking to reflect that the CPU
1802 * will be accessing all the pages to create and restore from the
1803 * hibernation, and so upon restoration those pages will be in the
1806 * To make sure the hibernation image contains the latest state,
1807 * we update that state just before writing out the image.
1809 * To try and reduce the hibernation image, we manually shrink
1810 * the objects as well, see i915_gem_freeze()
1813 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1815 i915_gem_shrink(i915, -1UL, NULL, ~0);
1816 i915_gem_drain_freed_objects(i915);
1818 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
1819 i915_gem_object_lock(obj);
1820 WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
1821 i915_gem_object_unlock(obj);
1824 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1829 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
1831 struct drm_i915_file_private *file_priv = file->driver_priv;
1832 struct i915_request *request;
1834 /* Clean up our request list when the client is going away, so that
1835 * later retire_requests won't dereference our soon-to-be-gone
1838 spin_lock(&file_priv->mm.lock);
1839 list_for_each_entry(request, &file_priv->mm.request_list, client_link)
1840 request->file_priv = NULL;
1841 spin_unlock(&file_priv->mm.lock);
1844 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1846 struct drm_i915_file_private *file_priv;
1851 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
1855 file->driver_priv = file_priv;
1856 file_priv->dev_priv = i915;
1857 file_priv->file = file;
1859 spin_lock_init(&file_priv->mm.lock);
1860 INIT_LIST_HEAD(&file_priv->mm.request_list);
1862 file_priv->bsd_engine = -1;
1863 file_priv->hang_timestamp = jiffies;
1865 ret = i915_gem_context_open(i915, file);
1873 * i915_gem_track_fb - update frontbuffer tracking
1874 * @old: current GEM buffer for the frontbuffer slots
1875 * @new: new GEM buffer for the frontbuffer slots
1876 * @frontbuffer_bits: bitmask of frontbuffer slots
1878 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
1879 * from @old and setting them in @new. Both @old and @new can be NULL.
1881 void i915_gem_track_fb(struct drm_i915_gem_object *old,
1882 struct drm_i915_gem_object *new,
1883 unsigned frontbuffer_bits)
1885 /* Control of individual bits within the mask are guarded by
1886 * the owning plane->mutex, i.e. we can never see concurrent
1887 * manipulation of individual bits. But since the bitfield as a whole
1888 * is updated using RMW, we need to use atomics in order to update
1891 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
1892 BITS_PER_TYPE(atomic_t));
1895 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
1896 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
1900 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
1901 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
1905 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1906 #include "selftests/mock_gem_device.c"
1907 #include "selftests/i915_gem.c"