2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include <drm/drm_vma_manager.h>
29 #include <drm/drm_pci.h>
30 #include <drm/i915_drm.h>
31 #include <linux/dma-fence-array.h>
32 #include <linux/kthread.h>
33 #include <linux/reservation.h>
34 #include <linux/shmem_fs.h>
35 #include <linux/slab.h>
36 #include <linux/stop_machine.h>
37 #include <linux/swap.h>
38 #include <linux/pci.h>
39 #include <linux/dma-buf.h>
40 #include <linux/mman.h>
43 #include "i915_gem_clflush.h"
44 #include "i915_gemfs.h"
45 #include "i915_globals.h"
46 #include "i915_reset.h"
47 #include "i915_trace.h"
48 #include "i915_vgpu.h"
50 #include "intel_drv.h"
51 #include "intel_frontbuffer.h"
52 #include "intel_mocs.h"
53 #include "intel_workarounds.h"
55 static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
57 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
62 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
65 return obj->pin_global; /* currently in use by HW, keep flushed */
69 insert_mappable_node(struct i915_ggtt *ggtt,
70 struct drm_mm_node *node, u32 size)
72 memset(node, 0, sizeof(*node));
73 return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
74 size, 0, I915_COLOR_UNEVICTABLE,
75 0, ggtt->mappable_end,
80 remove_mappable_node(struct drm_mm_node *node)
82 drm_mm_remove_node(node);
85 /* some bookkeeping */
86 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
89 spin_lock(&dev_priv->mm.object_stat_lock);
90 dev_priv->mm.object_count++;
91 dev_priv->mm.object_memory += size;
92 spin_unlock(&dev_priv->mm.object_stat_lock);
95 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
98 spin_lock(&dev_priv->mm.object_stat_lock);
99 dev_priv->mm.object_count--;
100 dev_priv->mm.object_memory -= size;
101 spin_unlock(&dev_priv->mm.object_stat_lock);
104 static void __i915_gem_park(struct drm_i915_private *i915)
106 intel_wakeref_t wakeref;
110 lockdep_assert_held(&i915->drm.struct_mutex);
111 GEM_BUG_ON(i915->gt.active_requests);
112 GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
118 * Be paranoid and flush a concurrent interrupt to make sure
119 * we don't reactivate any irq tasklets after parking.
121 * FIXME: Note that even though we have waited for execlists to be idle,
122 * there may still be an in-flight interrupt even though the CSB
123 * is now empty. synchronize_irq() makes sure that a residual interrupt
124 * is completed before we continue, but it doesn't prevent the HW from
125 * raising a spurious interrupt later. To complete the shield we should
126 * coordinate disabling the CS irq with flushing the interrupts.
128 synchronize_irq(i915->drm.irq);
130 intel_engines_park(i915);
131 i915_timelines_park(i915);
133 i915_pmu_gt_parked(i915);
134 i915_vma_parked(i915);
136 wakeref = fetch_and_zero(&i915->gt.awake);
137 GEM_BUG_ON(!wakeref);
139 if (INTEL_GEN(i915) >= 6)
142 intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
147 void i915_gem_park(struct drm_i915_private *i915)
151 lockdep_assert_held(&i915->drm.struct_mutex);
152 GEM_BUG_ON(i915->gt.active_requests);
157 /* Defer the actual call to __i915_gem_park() to prevent ping-pongs */
158 mod_delayed_work(i915->wq, &i915->gt.idle_work, msecs_to_jiffies(100));
161 void i915_gem_unpark(struct drm_i915_private *i915)
165 lockdep_assert_held(&i915->drm.struct_mutex);
166 GEM_BUG_ON(!i915->gt.active_requests);
167 assert_rpm_wakelock_held(i915);
173 * It seems that the DMC likes to transition between the DC states a lot
174 * when there are no connected displays (no active power domains) during
175 * command submission.
177 * This activity has negative impact on the performance of the chip with
178 * huge latencies observed in the interrupt handler and elsewhere.
180 * Work around it by grabbing a GT IRQ power domain whilst there is any
181 * GT activity, preventing any DC state transitions.
183 i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
184 GEM_BUG_ON(!i915->gt.awake);
186 i915_globals_unpark();
188 intel_enable_gt_powersave(i915);
189 i915_update_gfx_val(i915);
190 if (INTEL_GEN(i915) >= 6)
192 i915_pmu_gt_unparked(i915);
194 intel_engines_unpark(i915);
196 i915_queue_hangcheck(i915);
198 queue_delayed_work(i915->wq,
199 &i915->gt.retire_work,
200 round_jiffies_up_relative(HZ));
204 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
205 struct drm_file *file)
207 struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
208 struct drm_i915_gem_get_aperture *args = data;
209 struct i915_vma *vma;
212 mutex_lock(&ggtt->vm.mutex);
214 pinned = ggtt->vm.reserved;
215 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
216 if (i915_vma_is_pinned(vma))
217 pinned += vma->node.size;
219 mutex_unlock(&ggtt->vm.mutex);
221 args->aper_size = ggtt->vm.total;
222 args->aper_available_size = args->aper_size - pinned;
227 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
229 struct address_space *mapping = obj->base.filp->f_mapping;
230 drm_dma_handle_t *phys;
232 struct scatterlist *sg;
237 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
240 /* Always aligning to the object size, allows a single allocation
241 * to handle all possible callers, and given typical object sizes,
242 * the alignment of the buddy allocation will naturally match.
244 phys = drm_pci_alloc(obj->base.dev,
245 roundup_pow_of_two(obj->base.size),
246 roundup_pow_of_two(obj->base.size));
251 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
255 page = shmem_read_mapping_page(mapping, i);
261 src = kmap_atomic(page);
262 memcpy(vaddr, src, PAGE_SIZE);
263 drm_clflush_virt_range(vaddr, PAGE_SIZE);
270 i915_gem_chipset_flush(to_i915(obj->base.dev));
272 st = kmalloc(sizeof(*st), GFP_KERNEL);
278 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
286 sg->length = obj->base.size;
288 sg_dma_address(sg) = phys->busaddr;
289 sg_dma_len(sg) = obj->base.size;
291 obj->phys_handle = phys;
293 __i915_gem_object_set_pages(obj, st, sg->length);
298 drm_pci_free(obj->base.dev, phys);
303 static void __start_cpu_write(struct drm_i915_gem_object *obj)
305 obj->read_domains = I915_GEM_DOMAIN_CPU;
306 obj->write_domain = I915_GEM_DOMAIN_CPU;
307 if (cpu_write_needs_clflush(obj))
308 obj->cache_dirty = true;
312 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
313 struct sg_table *pages,
316 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
318 if (obj->mm.madv == I915_MADV_DONTNEED)
319 obj->mm.dirty = false;
322 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
323 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
324 drm_clflush_sg(pages);
326 __start_cpu_write(obj);
330 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
331 struct sg_table *pages)
333 __i915_gem_object_release_shmem(obj, pages, false);
336 struct address_space *mapping = obj->base.filp->f_mapping;
337 char *vaddr = obj->phys_handle->vaddr;
340 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
344 page = shmem_read_mapping_page(mapping, i);
348 dst = kmap_atomic(page);
349 drm_clflush_virt_range(vaddr, PAGE_SIZE);
350 memcpy(dst, vaddr, PAGE_SIZE);
353 set_page_dirty(page);
354 if (obj->mm.madv == I915_MADV_WILLNEED)
355 mark_page_accessed(page);
359 obj->mm.dirty = false;
362 sg_free_table(pages);
365 drm_pci_free(obj->base.dev, obj->phys_handle);
369 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
371 i915_gem_object_unpin_pages(obj);
374 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
375 .get_pages = i915_gem_object_get_pages_phys,
376 .put_pages = i915_gem_object_put_pages_phys,
377 .release = i915_gem_object_release_phys,
380 static const struct drm_i915_gem_object_ops i915_gem_object_ops;
382 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
384 struct i915_vma *vma;
385 LIST_HEAD(still_in_list);
388 lockdep_assert_held(&obj->base.dev->struct_mutex);
390 /* Closed vma are removed from the obj->vma_list - but they may
391 * still have an active binding on the object. To remove those we
392 * must wait for all rendering to complete to the object (as unbinding
393 * must anyway), and retire the requests.
395 ret = i915_gem_object_set_to_cpu_domain(obj, false);
399 spin_lock(&obj->vma.lock);
400 while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
403 list_move_tail(&vma->obj_link, &still_in_list);
404 spin_unlock(&obj->vma.lock);
406 ret = i915_vma_unbind(vma);
408 spin_lock(&obj->vma.lock);
410 list_splice(&still_in_list, &obj->vma.list);
411 spin_unlock(&obj->vma.lock);
417 i915_gem_object_wait_fence(struct dma_fence *fence,
421 struct i915_request *rq;
423 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
425 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
428 if (!dma_fence_is_i915(fence))
429 return dma_fence_wait_timeout(fence,
430 flags & I915_WAIT_INTERRUPTIBLE,
433 rq = to_request(fence);
434 if (i915_request_completed(rq))
437 timeout = i915_request_wait(rq, flags, timeout);
440 if (flags & I915_WAIT_LOCKED && i915_request_completed(rq))
441 i915_request_retire_upto(rq);
447 i915_gem_object_wait_reservation(struct reservation_object *resv,
451 unsigned int seq = __read_seqcount_begin(&resv->seq);
452 struct dma_fence *excl;
453 bool prune_fences = false;
455 if (flags & I915_WAIT_ALL) {
456 struct dma_fence **shared;
457 unsigned int count, i;
460 ret = reservation_object_get_fences_rcu(resv,
461 &excl, &count, &shared);
465 for (i = 0; i < count; i++) {
466 timeout = i915_gem_object_wait_fence(shared[i],
471 dma_fence_put(shared[i]);
474 for (; i < count; i++)
475 dma_fence_put(shared[i]);
479 * If both shared fences and an exclusive fence exist,
480 * then by construction the shared fences must be later
481 * than the exclusive fence. If we successfully wait for
482 * all the shared fences, we know that the exclusive fence
483 * must all be signaled. If all the shared fences are
484 * signaled, we can prune the array and recover the
485 * floating references on the fences/requests.
487 prune_fences = count && timeout >= 0;
489 excl = reservation_object_get_excl_rcu(resv);
492 if (excl && timeout >= 0)
493 timeout = i915_gem_object_wait_fence(excl, flags, timeout);
498 * Opportunistically prune the fences iff we know they have *all* been
499 * signaled and that the reservation object has not been changed (i.e.
500 * no new fences have been added).
502 if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
503 if (reservation_object_trylock(resv)) {
504 if (!__read_seqcount_retry(&resv->seq, seq))
505 reservation_object_add_excl_fence(resv, NULL);
506 reservation_object_unlock(resv);
513 static void __fence_set_priority(struct dma_fence *fence,
514 const struct i915_sched_attr *attr)
516 struct i915_request *rq;
517 struct intel_engine_cs *engine;
519 if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
522 rq = to_request(fence);
526 rcu_read_lock(); /* RCU serialisation for set-wedged protection */
527 if (engine->schedule)
528 engine->schedule(rq, attr);
530 local_bh_enable(); /* kick the tasklets if queues were reprioritised */
533 static void fence_set_priority(struct dma_fence *fence,
534 const struct i915_sched_attr *attr)
536 /* Recurse once into a fence-array */
537 if (dma_fence_is_array(fence)) {
538 struct dma_fence_array *array = to_dma_fence_array(fence);
541 for (i = 0; i < array->num_fences; i++)
542 __fence_set_priority(array->fences[i], attr);
544 __fence_set_priority(fence, attr);
549 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
551 const struct i915_sched_attr *attr)
553 struct dma_fence *excl;
555 if (flags & I915_WAIT_ALL) {
556 struct dma_fence **shared;
557 unsigned int count, i;
560 ret = reservation_object_get_fences_rcu(obj->resv,
561 &excl, &count, &shared);
565 for (i = 0; i < count; i++) {
566 fence_set_priority(shared[i], attr);
567 dma_fence_put(shared[i]);
572 excl = reservation_object_get_excl_rcu(obj->resv);
576 fence_set_priority(excl, attr);
583 * Waits for rendering to the object to be completed
584 * @obj: i915 gem object
585 * @flags: how to wait (under a lock, for all rendering or just for writes etc)
586 * @timeout: how long to wait
589 i915_gem_object_wait(struct drm_i915_gem_object *obj,
594 GEM_BUG_ON(timeout < 0);
596 timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout);
597 return timeout < 0 ? timeout : 0;
601 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
602 struct drm_i915_gem_pwrite *args,
603 struct drm_file *file)
605 void *vaddr = obj->phys_handle->vaddr + args->offset;
606 char __user *user_data = u64_to_user_ptr(args->data_ptr);
608 /* We manually control the domain here and pretend that it
609 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
611 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
612 if (copy_from_user(vaddr, user_data, args->size))
615 drm_clflush_virt_range(vaddr, args->size);
616 i915_gem_chipset_flush(to_i915(obj->base.dev));
618 intel_fb_obj_flush(obj, ORIGIN_CPU);
623 i915_gem_create(struct drm_file *file,
624 struct drm_i915_private *dev_priv,
628 struct drm_i915_gem_object *obj;
632 size = roundup(size, PAGE_SIZE);
636 /* Allocate the new object */
637 obj = i915_gem_object_create(dev_priv, size);
641 ret = drm_gem_handle_create(file, &obj->base, &handle);
642 /* drop reference from allocate - handle holds it now */
643 i915_gem_object_put(obj);
652 i915_gem_dumb_create(struct drm_file *file,
653 struct drm_device *dev,
654 struct drm_mode_create_dumb *args)
656 /* have to work out size/pitch and return them */
657 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
658 args->size = args->pitch * args->height;
659 return i915_gem_create(file, to_i915(dev),
660 args->size, &args->handle);
663 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
665 return !(obj->cache_level == I915_CACHE_NONE ||
666 obj->cache_level == I915_CACHE_WT);
670 * Creates a new mm object and returns a handle to it.
671 * @dev: drm device pointer
672 * @data: ioctl data blob
673 * @file: drm file pointer
676 i915_gem_create_ioctl(struct drm_device *dev, void *data,
677 struct drm_file *file)
679 struct drm_i915_private *dev_priv = to_i915(dev);
680 struct drm_i915_gem_create *args = data;
682 i915_gem_flush_free_objects(dev_priv);
684 return i915_gem_create(file, dev_priv,
685 args->size, &args->handle);
688 static inline enum fb_op_origin
689 fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
691 return (domain == I915_GEM_DOMAIN_GTT ?
692 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
695 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
697 intel_wakeref_t wakeref;
700 * No actual flushing is required for the GTT write domain for reads
701 * from the GTT domain. Writes to it "immediately" go to main memory
702 * as far as we know, so there's no chipset flush. It also doesn't
703 * land in the GPU render cache.
705 * However, we do have to enforce the order so that all writes through
706 * the GTT land before any writes to the device, such as updates to
709 * We also have to wait a bit for the writes to land from the GTT.
710 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
711 * timing. This issue has only been observed when switching quickly
712 * between GTT writes and CPU reads from inside the kernel on recent hw,
713 * and it appears to only affect discrete GTT blocks (i.e. on LLC
714 * system agents we cannot reproduce this behaviour, until Cannonlake
720 if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
723 i915_gem_chipset_flush(dev_priv);
725 with_intel_runtime_pm(dev_priv, wakeref) {
726 spin_lock_irq(&dev_priv->uncore.lock);
728 POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
730 spin_unlock_irq(&dev_priv->uncore.lock);
735 flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
737 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
738 struct i915_vma *vma;
740 if (!(obj->write_domain & flush_domains))
743 switch (obj->write_domain) {
744 case I915_GEM_DOMAIN_GTT:
745 i915_gem_flush_ggtt_writes(dev_priv);
747 intel_fb_obj_flush(obj,
748 fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
750 for_each_ggtt_vma(vma, obj) {
754 i915_vma_unset_ggtt_write(vma);
758 case I915_GEM_DOMAIN_WC:
762 case I915_GEM_DOMAIN_CPU:
763 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
766 case I915_GEM_DOMAIN_RENDER:
767 if (gpu_write_needs_clflush(obj))
768 obj->cache_dirty = true;
772 obj->write_domain = 0;
776 * Pins the specified object's pages and synchronizes the object with
777 * GPU accesses. Sets needs_clflush to non-zero if the caller should
778 * flush the object from the CPU cache.
780 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
781 unsigned int *needs_clflush)
785 lockdep_assert_held(&obj->base.dev->struct_mutex);
788 if (!i915_gem_object_has_struct_page(obj))
791 ret = i915_gem_object_wait(obj,
792 I915_WAIT_INTERRUPTIBLE |
794 MAX_SCHEDULE_TIMEOUT);
798 ret = i915_gem_object_pin_pages(obj);
802 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
803 !static_cpu_has(X86_FEATURE_CLFLUSH)) {
804 ret = i915_gem_object_set_to_cpu_domain(obj, false);
811 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
813 /* If we're not in the cpu read domain, set ourself into the gtt
814 * read domain and manually flush cachelines (if required). This
815 * optimizes for the case when the gpu will dirty the data
816 * anyway again before the next pread happens.
818 if (!obj->cache_dirty &&
819 !(obj->read_domains & I915_GEM_DOMAIN_CPU))
820 *needs_clflush = CLFLUSH_BEFORE;
823 /* return with the pages pinned */
827 i915_gem_object_unpin_pages(obj);
831 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
832 unsigned int *needs_clflush)
836 lockdep_assert_held(&obj->base.dev->struct_mutex);
839 if (!i915_gem_object_has_struct_page(obj))
842 ret = i915_gem_object_wait(obj,
843 I915_WAIT_INTERRUPTIBLE |
846 MAX_SCHEDULE_TIMEOUT);
850 ret = i915_gem_object_pin_pages(obj);
854 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
855 !static_cpu_has(X86_FEATURE_CLFLUSH)) {
856 ret = i915_gem_object_set_to_cpu_domain(obj, true);
863 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
865 /* If we're not in the cpu write domain, set ourself into the
866 * gtt write domain and manually flush cachelines (as required).
867 * This optimizes for the case when the gpu will use the data
868 * right away and we therefore have to clflush anyway.
870 if (!obj->cache_dirty) {
871 *needs_clflush |= CLFLUSH_AFTER;
874 * Same trick applies to invalidate partially written
875 * cachelines read before writing.
877 if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
878 *needs_clflush |= CLFLUSH_BEFORE;
882 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
883 obj->mm.dirty = true;
884 /* return with the pages pinned */
888 i915_gem_object_unpin_pages(obj);
893 shmem_pread(struct page *page, int offset, int len, char __user *user_data,
902 drm_clflush_virt_range(vaddr + offset, len);
904 ret = __copy_to_user(user_data, vaddr + offset, len);
908 return ret ? -EFAULT : 0;
912 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
913 struct drm_i915_gem_pread *args)
915 char __user *user_data;
917 unsigned int needs_clflush;
918 unsigned int idx, offset;
921 ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
925 ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
926 mutex_unlock(&obj->base.dev->struct_mutex);
931 user_data = u64_to_user_ptr(args->data_ptr);
932 offset = offset_in_page(args->offset);
933 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
934 struct page *page = i915_gem_object_get_page(obj, idx);
935 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
937 ret = shmem_pread(page, offset, length, user_data,
947 i915_gem_obj_finish_shmem_access(obj);
952 gtt_user_read(struct io_mapping *mapping,
953 loff_t base, int offset,
954 char __user *user_data, int length)
957 unsigned long unwritten;
959 /* We can use the cpu mem copy function because this is X86. */
960 vaddr = io_mapping_map_atomic_wc(mapping, base);
961 unwritten = __copy_to_user_inatomic(user_data,
962 (void __force *)vaddr + offset,
964 io_mapping_unmap_atomic(vaddr);
966 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
967 unwritten = copy_to_user(user_data,
968 (void __force *)vaddr + offset,
970 io_mapping_unmap(vaddr);
976 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
977 const struct drm_i915_gem_pread *args)
979 struct drm_i915_private *i915 = to_i915(obj->base.dev);
980 struct i915_ggtt *ggtt = &i915->ggtt;
981 intel_wakeref_t wakeref;
982 struct drm_mm_node node;
983 struct i915_vma *vma;
984 void __user *user_data;
988 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
992 wakeref = intel_runtime_pm_get(i915);
993 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
998 node.start = i915_ggtt_offset(vma);
999 node.allocated = false;
1000 ret = i915_vma_put_fence(vma);
1002 i915_vma_unpin(vma);
1007 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1010 GEM_BUG_ON(!node.allocated);
1013 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1017 mutex_unlock(&i915->drm.struct_mutex);
1019 user_data = u64_to_user_ptr(args->data_ptr);
1020 remain = args->size;
1021 offset = args->offset;
1023 while (remain > 0) {
1024 /* Operation in this page
1026 * page_base = page offset within aperture
1027 * page_offset = offset within page
1028 * page_length = bytes to copy for this page
1030 u32 page_base = node.start;
1031 unsigned page_offset = offset_in_page(offset);
1032 unsigned page_length = PAGE_SIZE - page_offset;
1033 page_length = remain < page_length ? remain : page_length;
1034 if (node.allocated) {
1036 ggtt->vm.insert_page(&ggtt->vm,
1037 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1038 node.start, I915_CACHE_NONE, 0);
1041 page_base += offset & PAGE_MASK;
1044 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
1045 user_data, page_length)) {
1050 remain -= page_length;
1051 user_data += page_length;
1052 offset += page_length;
1055 mutex_lock(&i915->drm.struct_mutex);
1057 if (node.allocated) {
1059 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
1060 remove_mappable_node(&node);
1062 i915_vma_unpin(vma);
1065 intel_runtime_pm_put(i915, wakeref);
1066 mutex_unlock(&i915->drm.struct_mutex);
1072 * Reads data from the object referenced by handle.
1073 * @dev: drm device pointer
1074 * @data: ioctl data blob
1075 * @file: drm file pointer
1077 * On error, the contents of *data are undefined.
1080 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1081 struct drm_file *file)
1083 struct drm_i915_gem_pread *args = data;
1084 struct drm_i915_gem_object *obj;
1087 if (args->size == 0)
1090 if (!access_ok(u64_to_user_ptr(args->data_ptr),
1094 obj = i915_gem_object_lookup(file, args->handle);
1098 /* Bounds check source. */
1099 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
1104 trace_i915_gem_object_pread(obj, args->offset, args->size);
1106 ret = i915_gem_object_wait(obj,
1107 I915_WAIT_INTERRUPTIBLE,
1108 MAX_SCHEDULE_TIMEOUT);
1112 ret = i915_gem_object_pin_pages(obj);
1116 ret = i915_gem_shmem_pread(obj, args);
1117 if (ret == -EFAULT || ret == -ENODEV)
1118 ret = i915_gem_gtt_pread(obj, args);
1120 i915_gem_object_unpin_pages(obj);
1122 i915_gem_object_put(obj);
1126 /* This is the fast write path which cannot handle
1127 * page faults in the source data
1131 ggtt_write(struct io_mapping *mapping,
1132 loff_t base, int offset,
1133 char __user *user_data, int length)
1135 void __iomem *vaddr;
1136 unsigned long unwritten;
1138 /* We can use the cpu mem copy function because this is X86. */
1139 vaddr = io_mapping_map_atomic_wc(mapping, base);
1140 unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
1142 io_mapping_unmap_atomic(vaddr);
1144 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
1145 unwritten = copy_from_user((void __force *)vaddr + offset,
1147 io_mapping_unmap(vaddr);
1154 * This is the fast pwrite path, where we copy the data directly from the
1155 * user into the GTT, uncached.
1156 * @obj: i915 GEM object
1157 * @args: pwrite arguments structure
1160 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
1161 const struct drm_i915_gem_pwrite *args)
1163 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1164 struct i915_ggtt *ggtt = &i915->ggtt;
1165 intel_wakeref_t wakeref;
1166 struct drm_mm_node node;
1167 struct i915_vma *vma;
1169 void __user *user_data;
1172 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1176 if (i915_gem_object_has_struct_page(obj)) {
1178 * Avoid waking the device up if we can fallback, as
1179 * waking/resuming is very slow (worst-case 10-100 ms
1180 * depending on PCI sleeps and our own resume time).
1181 * This easily dwarfs any performance advantage from
1182 * using the cache bypass of indirect GGTT access.
1184 wakeref = intel_runtime_pm_get_if_in_use(i915);
1190 /* No backing pages, no fallback, we must force GGTT access */
1191 wakeref = intel_runtime_pm_get(i915);
1194 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1199 node.start = i915_ggtt_offset(vma);
1200 node.allocated = false;
1201 ret = i915_vma_put_fence(vma);
1203 i915_vma_unpin(vma);
1208 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1211 GEM_BUG_ON(!node.allocated);
1214 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1218 mutex_unlock(&i915->drm.struct_mutex);
1220 intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1222 user_data = u64_to_user_ptr(args->data_ptr);
1223 offset = args->offset;
1224 remain = args->size;
1226 /* Operation in this page
1228 * page_base = page offset within aperture
1229 * page_offset = offset within page
1230 * page_length = bytes to copy for this page
1232 u32 page_base = node.start;
1233 unsigned int page_offset = offset_in_page(offset);
1234 unsigned int page_length = PAGE_SIZE - page_offset;
1235 page_length = remain < page_length ? remain : page_length;
1236 if (node.allocated) {
1237 wmb(); /* flush the write before we modify the GGTT */
1238 ggtt->vm.insert_page(&ggtt->vm,
1239 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1240 node.start, I915_CACHE_NONE, 0);
1241 wmb(); /* flush modifications to the GGTT (insert_page) */
1243 page_base += offset & PAGE_MASK;
1245 /* If we get a fault while copying data, then (presumably) our
1246 * source page isn't available. Return the error and we'll
1247 * retry in the slow path.
1248 * If the object is non-shmem backed, we retry again with the
1249 * path that handles page fault.
1251 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
1252 user_data, page_length)) {
1257 remain -= page_length;
1258 user_data += page_length;
1259 offset += page_length;
1261 intel_fb_obj_flush(obj, ORIGIN_CPU);
1263 mutex_lock(&i915->drm.struct_mutex);
1265 if (node.allocated) {
1267 ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
1268 remove_mappable_node(&node);
1270 i915_vma_unpin(vma);
1273 intel_runtime_pm_put(i915, wakeref);
1275 mutex_unlock(&i915->drm.struct_mutex);
1279 /* Per-page copy function for the shmem pwrite fastpath.
1280 * Flushes invalid cachelines before writing to the target if
1281 * needs_clflush_before is set and flushes out any written cachelines after
1282 * writing if needs_clflush is set.
1285 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
1286 bool needs_clflush_before,
1287 bool needs_clflush_after)
1294 if (needs_clflush_before)
1295 drm_clflush_virt_range(vaddr + offset, len);
1297 ret = __copy_from_user(vaddr + offset, user_data, len);
1298 if (!ret && needs_clflush_after)
1299 drm_clflush_virt_range(vaddr + offset, len);
1303 return ret ? -EFAULT : 0;
1307 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1308 const struct drm_i915_gem_pwrite *args)
1310 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1311 void __user *user_data;
1313 unsigned int partial_cacheline_write;
1314 unsigned int needs_clflush;
1315 unsigned int offset, idx;
1318 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1322 ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1323 mutex_unlock(&i915->drm.struct_mutex);
1327 /* If we don't overwrite a cacheline completely we need to be
1328 * careful to have up-to-date data by first clflushing. Don't
1329 * overcomplicate things and flush the entire patch.
1331 partial_cacheline_write = 0;
1332 if (needs_clflush & CLFLUSH_BEFORE)
1333 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1335 user_data = u64_to_user_ptr(args->data_ptr);
1336 remain = args->size;
1337 offset = offset_in_page(args->offset);
1338 for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1339 struct page *page = i915_gem_object_get_page(obj, idx);
1340 unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
1342 ret = shmem_pwrite(page, offset, length, user_data,
1343 (offset | length) & partial_cacheline_write,
1344 needs_clflush & CLFLUSH_AFTER);
1349 user_data += length;
1353 intel_fb_obj_flush(obj, ORIGIN_CPU);
1354 i915_gem_obj_finish_shmem_access(obj);
1359 * Writes data to the object referenced by handle.
1361 * @data: ioctl data blob
1364 * On error, the contents of the buffer that were to be modified are undefined.
1367 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1368 struct drm_file *file)
1370 struct drm_i915_gem_pwrite *args = data;
1371 struct drm_i915_gem_object *obj;
1374 if (args->size == 0)
1377 if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
1380 obj = i915_gem_object_lookup(file, args->handle);
1384 /* Bounds check destination. */
1385 if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
1390 /* Writes not allowed into this read-only object */
1391 if (i915_gem_object_is_readonly(obj)) {
1396 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1399 if (obj->ops->pwrite)
1400 ret = obj->ops->pwrite(obj, args);
1404 ret = i915_gem_object_wait(obj,
1405 I915_WAIT_INTERRUPTIBLE |
1407 MAX_SCHEDULE_TIMEOUT);
1411 ret = i915_gem_object_pin_pages(obj);
1416 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1417 * it would end up going through the fenced access, and we'll get
1418 * different detiling behavior between reading and writing.
1419 * pread/pwrite currently are reading and writing from the CPU
1420 * perspective, requiring manual detiling by the client.
1422 if (!i915_gem_object_has_struct_page(obj) ||
1423 cpu_write_needs_clflush(obj))
1424 /* Note that the gtt paths might fail with non-page-backed user
1425 * pointers (e.g. gtt mappings when moving data between
1426 * textures). Fallback to the shmem path in that case.
1428 ret = i915_gem_gtt_pwrite_fast(obj, args);
1430 if (ret == -EFAULT || ret == -ENOSPC) {
1431 if (obj->phys_handle)
1432 ret = i915_gem_phys_pwrite(obj, args, file);
1434 ret = i915_gem_shmem_pwrite(obj, args);
1437 i915_gem_object_unpin_pages(obj);
1439 i915_gem_object_put(obj);
1443 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
1445 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1446 struct list_head *list;
1447 struct i915_vma *vma;
1449 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1451 mutex_lock(&i915->ggtt.vm.mutex);
1452 for_each_ggtt_vma(vma, obj) {
1453 if (!drm_mm_node_allocated(&vma->node))
1456 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1458 mutex_unlock(&i915->ggtt.vm.mutex);
1460 spin_lock(&i915->mm.obj_lock);
1461 list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1462 list_move_tail(&obj->mm.link, list);
1463 spin_unlock(&i915->mm.obj_lock);
1467 * Called when user space prepares to use an object with the CPU, either
1468 * through the mmap ioctl's mapping or a GTT mapping.
1470 * @data: ioctl data blob
1474 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1475 struct drm_file *file)
1477 struct drm_i915_gem_set_domain *args = data;
1478 struct drm_i915_gem_object *obj;
1479 u32 read_domains = args->read_domains;
1480 u32 write_domain = args->write_domain;
1483 /* Only handle setting domains to types used by the CPU. */
1484 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1488 * Having something in the write domain implies it's in the read
1489 * domain, and only that read domain. Enforce that in the request.
1491 if (write_domain && read_domains != write_domain)
1497 obj = i915_gem_object_lookup(file, args->handle);
1502 * Already in the desired write domain? Nothing for us to do!
1504 * We apply a little bit of cunning here to catch a broader set of
1505 * no-ops. If obj->write_domain is set, we must be in the same
1506 * obj->read_domains, and only that domain. Therefore, if that
1507 * obj->write_domain matches the request read_domains, we are
1508 * already in the same read/write domain and can skip the operation,
1509 * without having to further check the requested write_domain.
1511 if (READ_ONCE(obj->write_domain) == read_domains) {
1517 * Try to flush the object off the GPU without holding the lock.
1518 * We will repeat the flush holding the lock in the normal manner
1519 * to catch cases where we are gazumped.
1521 err = i915_gem_object_wait(obj,
1522 I915_WAIT_INTERRUPTIBLE |
1523 I915_WAIT_PRIORITY |
1524 (write_domain ? I915_WAIT_ALL : 0),
1525 MAX_SCHEDULE_TIMEOUT);
1530 * Proxy objects do not control access to the backing storage, ergo
1531 * they cannot be used as a means to manipulate the cache domain
1532 * tracking for that backing storage. The proxy object is always
1533 * considered to be outside of any cache domain.
1535 if (i915_gem_object_is_proxy(obj)) {
1541 * Flush and acquire obj->pages so that we are coherent through
1542 * direct access in memory with previous cached writes through
1543 * shmemfs and that our cache domain tracking remains valid.
1544 * For example, if the obj->filp was moved to swap without us
1545 * being notified and releasing the pages, we would mistakenly
1546 * continue to assume that the obj remained out of the CPU cached
1549 err = i915_gem_object_pin_pages(obj);
1553 err = i915_mutex_lock_interruptible(dev);
1557 if (read_domains & I915_GEM_DOMAIN_WC)
1558 err = i915_gem_object_set_to_wc_domain(obj, write_domain);
1559 else if (read_domains & I915_GEM_DOMAIN_GTT)
1560 err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
1562 err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
1564 /* And bump the LRU for this access */
1565 i915_gem_object_bump_inactive_ggtt(obj);
1567 mutex_unlock(&dev->struct_mutex);
1569 if (write_domain != 0)
1570 intel_fb_obj_invalidate(obj,
1571 fb_write_origin(obj, write_domain));
1574 i915_gem_object_unpin_pages(obj);
1576 i915_gem_object_put(obj);
1581 * Called when user space has done writes to this buffer
1583 * @data: ioctl data blob
1587 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1588 struct drm_file *file)
1590 struct drm_i915_gem_sw_finish *args = data;
1591 struct drm_i915_gem_object *obj;
1593 obj = i915_gem_object_lookup(file, args->handle);
1598 * Proxy objects are barred from CPU access, so there is no
1599 * need to ban sw_finish as it is a nop.
1602 /* Pinned buffers may be scanout, so flush the cache */
1603 i915_gem_object_flush_if_display(obj);
1604 i915_gem_object_put(obj);
1610 __vma_matches(struct vm_area_struct *vma, struct file *filp,
1611 unsigned long addr, unsigned long size)
1613 if (vma->vm_file != filp)
1616 return vma->vm_start == addr &&
1617 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
1621 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1624 * @data: ioctl data blob
1627 * While the mapping holds a reference on the contents of the object, it doesn't
1628 * imply a ref on the object itself.
1632 * DRM driver writers who look a this function as an example for how to do GEM
1633 * mmap support, please don't implement mmap support like here. The modern way
1634 * to implement DRM mmap support is with an mmap offset ioctl (like
1635 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1636 * That way debug tooling like valgrind will understand what's going on, hiding
1637 * the mmap call in a driver private ioctl will break that. The i915 driver only
1638 * does cpu mmaps this way because we didn't know better.
1641 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1642 struct drm_file *file)
1644 struct drm_i915_gem_mmap *args = data;
1645 struct drm_i915_gem_object *obj;
1648 if (args->flags & ~(I915_MMAP_WC))
1651 if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1654 obj = i915_gem_object_lookup(file, args->handle);
1658 /* prime objects have no backing filp to GEM mmap
1661 if (!obj->base.filp) {
1666 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
1671 addr = vm_mmap(obj->base.filp, 0, args->size,
1672 PROT_READ | PROT_WRITE, MAP_SHARED,
1674 if (IS_ERR_VALUE(addr))
1677 if (args->flags & I915_MMAP_WC) {
1678 struct mm_struct *mm = current->mm;
1679 struct vm_area_struct *vma;
1681 if (down_write_killable(&mm->mmap_sem)) {
1685 vma = find_vma(mm, addr);
1686 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
1688 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1691 up_write(&mm->mmap_sem);
1692 if (IS_ERR_VALUE(addr))
1695 /* This may race, but that's ok, it only gets set */
1696 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1698 i915_gem_object_put(obj);
1700 args->addr_ptr = (u64)addr;
1704 i915_gem_object_put(obj);
1708 static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
1710 return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
1714 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1716 * A history of the GTT mmap interface:
1718 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1719 * aligned and suitable for fencing, and still fit into the available
1720 * mappable space left by the pinned display objects. A classic problem
1721 * we called the page-fault-of-doom where we would ping-pong between
1722 * two objects that could not fit inside the GTT and so the memcpy
1723 * would page one object in at the expense of the other between every
1726 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1727 * as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1728 * object is too large for the available space (or simply too large
1729 * for the mappable aperture!), a view is created instead and faulted
1730 * into userspace. (This view is aligned and sized appropriately for
1733 * 2 - Recognise WC as a separate cache domain so that we can flush the
1734 * delayed writes via GTT before performing direct access via WC.
1736 * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
1737 * pagefault; swapin remains transparent.
1741 * * snoopable objects cannot be accessed via the GTT. It can cause machine
1742 * hangs on some architectures, corruption on others. An attempt to service
1743 * a GTT page fault from a snoopable object will generate a SIGBUS.
1745 * * the object must be able to fit into RAM (physical memory, though no
1746 * limited to the mappable aperture).
1751 * * a new GTT page fault will synchronize rendering from the GPU and flush
1752 * all data to system memory. Subsequent access will not be synchronized.
1754 * * all mappings are revoked on runtime device suspend.
1756 * * there are only 8, 16 or 32 fence registers to share between all users
1757 * (older machines require fence register for display and blitter access
1758 * as well). Contention of the fence registers will cause the previous users
1759 * to be unmapped and any new access will generate new page faults.
1761 * * running out of memory while servicing a fault may generate a SIGBUS,
1762 * rather than the expected SIGSEGV.
1764 int i915_gem_mmap_gtt_version(void)
1769 static inline struct i915_ggtt_view
1770 compute_partial_view(const struct drm_i915_gem_object *obj,
1771 pgoff_t page_offset,
1774 struct i915_ggtt_view view;
1776 if (i915_gem_object_is_tiled(obj))
1777 chunk = roundup(chunk, tile_row_pages(obj));
1779 view.type = I915_GGTT_VIEW_PARTIAL;
1780 view.partial.offset = rounddown(page_offset, chunk);
1782 min_t(unsigned int, chunk,
1783 (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
1785 /* If the partial covers the entire object, just create a normal VMA. */
1786 if (chunk >= obj->base.size >> PAGE_SHIFT)
1787 view.type = I915_GGTT_VIEW_NORMAL;
1793 * i915_gem_fault - fault a page into the GTT
1796 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1797 * from userspace. The fault handler takes care of binding the object to
1798 * the GTT (if needed), allocating and programming a fence register (again,
1799 * only if needed based on whether the old reg is still valid or the object
1800 * is tiled) and inserting a new PTE into the faulting process.
1802 * Note that the faulting process may involve evicting existing objects
1803 * from the GTT and/or fence registers to make room. So performance may
1804 * suffer if the GTT working set is large or there are few fence registers
1807 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1808 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1810 vm_fault_t i915_gem_fault(struct vm_fault *vmf)
1812 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
1813 struct vm_area_struct *area = vmf->vma;
1814 struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1815 struct drm_device *dev = obj->base.dev;
1816 struct drm_i915_private *dev_priv = to_i915(dev);
1817 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1818 bool write = area->vm_flags & VM_WRITE;
1819 intel_wakeref_t wakeref;
1820 struct i915_vma *vma;
1821 pgoff_t page_offset;
1825 /* Sanity check that we allow writing into this object */
1826 if (i915_gem_object_is_readonly(obj) && write)
1827 return VM_FAULT_SIGBUS;
1829 /* We don't use vmf->pgoff since that has the fake offset */
1830 page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
1832 trace_i915_gem_object_fault(obj, page_offset, true, write);
1834 ret = i915_gem_object_pin_pages(obj);
1838 wakeref = intel_runtime_pm_get(dev_priv);
1840 srcu = i915_reset_trylock(dev_priv);
1846 ret = i915_mutex_lock_interruptible(dev);
1850 /* Access to snoopable pages through the GTT is incoherent. */
1851 if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
1856 /* Now pin it into the GTT as needed */
1857 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1862 /* Use a partial view if it is bigger than available space */
1863 struct i915_ggtt_view view =
1864 compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
1867 flags = PIN_MAPPABLE;
1868 if (view.type == I915_GGTT_VIEW_NORMAL)
1869 flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
1872 * Userspace is now writing through an untracked VMA, abandon
1873 * all hope that the hardware is able to track future writes.
1875 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1877 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
1878 if (IS_ERR(vma) && !view.type) {
1879 flags = PIN_MAPPABLE;
1880 view.type = I915_GGTT_VIEW_PARTIAL;
1881 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
1889 ret = i915_vma_pin_fence(vma);
1893 /* Finally, remap it using the new GTT offset */
1894 ret = remap_io_mapping(area,
1895 area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
1896 (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
1897 min_t(u64, vma->size, area->vm_end - area->vm_start),
1902 /* Mark as being mmapped into userspace for later revocation */
1903 assert_rpm_wakelock_held(dev_priv);
1904 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
1905 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
1906 GEM_BUG_ON(!obj->userfault_count);
1908 i915_vma_set_ggtt_write(vma);
1911 i915_vma_unpin_fence(vma);
1913 __i915_vma_unpin(vma);
1915 mutex_unlock(&dev->struct_mutex);
1917 i915_reset_unlock(dev_priv, srcu);
1919 intel_runtime_pm_put(dev_priv, wakeref);
1920 i915_gem_object_unpin_pages(obj);
1925 * We eat errors when the gpu is terminally wedged to avoid
1926 * userspace unduly crashing (gl has no provisions for mmaps to
1927 * fail). But any other -EIO isn't ours (e.g. swap in failure)
1928 * and so needs to be reported.
1930 if (!i915_terminally_wedged(dev_priv))
1931 return VM_FAULT_SIGBUS;
1932 /* else: fall through */
1935 * EAGAIN means the gpu is hung and we'll wait for the error
1936 * handler to reset everything when re-faulting in
1937 * i915_mutex_lock_interruptible.
1944 * EBUSY is ok: this just means that another thread
1945 * already did the job.
1947 return VM_FAULT_NOPAGE;
1949 return VM_FAULT_OOM;
1952 return VM_FAULT_SIGBUS;
1954 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1955 return VM_FAULT_SIGBUS;
1959 static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
1961 struct i915_vma *vma;
1963 GEM_BUG_ON(!obj->userfault_count);
1965 obj->userfault_count = 0;
1966 list_del(&obj->userfault_link);
1967 drm_vma_node_unmap(&obj->base.vma_node,
1968 obj->base.dev->anon_inode->i_mapping);
1970 for_each_ggtt_vma(vma, obj)
1971 i915_vma_unset_userfault(vma);
1975 * i915_gem_release_mmap - remove physical page mappings
1976 * @obj: obj in question
1978 * Preserve the reservation of the mmapping with the DRM core code, but
1979 * relinquish ownership of the pages back to the system.
1981 * It is vital that we remove the page mapping if we have mapped a tiled
1982 * object through the GTT and then lose the fence register due to
1983 * resource pressure. Similarly if the object has been moved out of the
1984 * aperture, than pages mapped into userspace must be revoked. Removing the
1985 * mapping will then trigger a page fault on the next user access, allowing
1986 * fixup by i915_gem_fault().
1989 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1991 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1992 intel_wakeref_t wakeref;
1994 /* Serialisation between user GTT access and our code depends upon
1995 * revoking the CPU's PTE whilst the mutex is held. The next user
1996 * pagefault then has to wait until we release the mutex.
1998 * Note that RPM complicates somewhat by adding an additional
1999 * requirement that operations to the GGTT be made holding the RPM
2002 lockdep_assert_held(&i915->drm.struct_mutex);
2003 wakeref = intel_runtime_pm_get(i915);
2005 if (!obj->userfault_count)
2008 __i915_gem_object_release_mmap(obj);
2010 /* Ensure that the CPU's PTE are revoked and there are not outstanding
2011 * memory transactions from userspace before we return. The TLB
2012 * flushing implied above by changing the PTE above *should* be
2013 * sufficient, an extra barrier here just provides us with a bit
2014 * of paranoid documentation about our requirement to serialise
2015 * memory writes before touching registers / GSM.
2020 intel_runtime_pm_put(i915, wakeref);
2023 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
2025 struct drm_i915_gem_object *obj, *on;
2029 * Only called during RPM suspend. All users of the userfault_list
2030 * must be holding an RPM wakeref to ensure that this can not
2031 * run concurrently with themselves (and use the struct_mutex for
2032 * protection between themselves).
2035 list_for_each_entry_safe(obj, on,
2036 &dev_priv->mm.userfault_list, userfault_link)
2037 __i915_gem_object_release_mmap(obj);
2039 /* The fence will be lost when the device powers down. If any were
2040 * in use by hardware (i.e. they are pinned), we should not be powering
2041 * down! All other fences will be reacquired by the user upon waking.
2043 for (i = 0; i < dev_priv->num_fence_regs; i++) {
2044 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2046 /* Ideally we want to assert that the fence register is not
2047 * live at this point (i.e. that no piece of code will be
2048 * trying to write through fence + GTT, as that both violates
2049 * our tracking of activity and associated locking/barriers,
2050 * but also is illegal given that the hw is powered down).
2052 * Previously we used reg->pin_count as a "liveness" indicator.
2053 * That is not sufficient, and we need a more fine-grained
2054 * tool if we want to have a sanity check here.
2060 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
2065 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2067 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2070 err = drm_gem_create_mmap_offset(&obj->base);
2074 /* Attempt to reap some mmap space from dead objects */
2076 err = i915_gem_wait_for_idle(dev_priv,
2077 I915_WAIT_INTERRUPTIBLE,
2078 MAX_SCHEDULE_TIMEOUT);
2082 i915_gem_drain_freed_objects(dev_priv);
2083 err = drm_gem_create_mmap_offset(&obj->base);
2087 } while (flush_delayed_work(&dev_priv->gt.retire_work));
2092 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2094 drm_gem_free_mmap_offset(&obj->base);
2098 i915_gem_mmap_gtt(struct drm_file *file,
2099 struct drm_device *dev,
2103 struct drm_i915_gem_object *obj;
2106 obj = i915_gem_object_lookup(file, handle);
2110 ret = i915_gem_object_create_mmap_offset(obj);
2112 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2114 i915_gem_object_put(obj);
2119 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2121 * @data: GTT mapping ioctl data
2122 * @file: GEM object info
2124 * Simply returns the fake offset to userspace so it can mmap it.
2125 * The mmap call will end up in drm_gem_mmap(), which will set things
2126 * up so we can get faults in the handler above.
2128 * The fault handler will take care of binding the object into the GTT
2129 * (since it may have been evicted to make room for something), allocating
2130 * a fence register, and mapping the appropriate aperture address into
2134 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2135 struct drm_file *file)
2137 struct drm_i915_gem_mmap_gtt *args = data;
2139 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2142 /* Immediately discard the backing storage */
2144 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2146 i915_gem_object_free_mmap_offset(obj);
2148 if (obj->base.filp == NULL)
2151 /* Our goal here is to return as much of the memory as
2152 * is possible back to the system as we are called from OOM.
2153 * To do this we must instruct the shmfs to drop all of its
2154 * backing pages, *now*.
2156 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2157 obj->mm.madv = __I915_MADV_PURGED;
2158 obj->mm.pages = ERR_PTR(-EFAULT);
2161 /* Try to discard unwanted pages */
2162 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2164 struct address_space *mapping;
2166 lockdep_assert_held(&obj->mm.lock);
2167 GEM_BUG_ON(i915_gem_object_has_pages(obj));
2169 switch (obj->mm.madv) {
2170 case I915_MADV_DONTNEED:
2171 i915_gem_object_truncate(obj);
2172 case __I915_MADV_PURGED:
2176 if (obj->base.filp == NULL)
2179 mapping = obj->base.filp->f_mapping,
2180 invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2184 * Move pages to appropriate lru and release the pagevec, decrementing the
2185 * ref count of those pages.
2187 static void check_release_pagevec(struct pagevec *pvec)
2189 check_move_unevictable_pages(pvec);
2190 __pagevec_release(pvec);
2195 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2196 struct sg_table *pages)
2198 struct sgt_iter sgt_iter;
2199 struct pagevec pvec;
2202 __i915_gem_object_release_shmem(obj, pages, true);
2204 i915_gem_gtt_finish_pages(obj, pages);
2206 if (i915_gem_object_needs_bit17_swizzle(obj))
2207 i915_gem_object_save_bit_17_swizzle(obj, pages);
2209 mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
2211 pagevec_init(&pvec);
2212 for_each_sgt_page(page, sgt_iter, pages) {
2214 set_page_dirty(page);
2216 if (obj->mm.madv == I915_MADV_WILLNEED)
2217 mark_page_accessed(page);
2219 if (!pagevec_add(&pvec, page))
2220 check_release_pagevec(&pvec);
2222 if (pagevec_count(&pvec))
2223 check_release_pagevec(&pvec);
2224 obj->mm.dirty = false;
2226 sg_free_table(pages);
2230 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
2232 struct radix_tree_iter iter;
2236 radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
2237 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2241 static struct sg_table *
2242 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
2244 struct drm_i915_private *i915 = to_i915(obj->base.dev);
2245 struct sg_table *pages;
2247 pages = fetch_and_zero(&obj->mm.pages);
2248 if (IS_ERR_OR_NULL(pages))
2251 spin_lock(&i915->mm.obj_lock);
2252 list_del(&obj->mm.link);
2253 spin_unlock(&i915->mm.obj_lock);
2255 if (obj->mm.mapping) {
2258 ptr = page_mask_bits(obj->mm.mapping);
2259 if (is_vmalloc_addr(ptr))
2262 kunmap(kmap_to_page(ptr));
2264 obj->mm.mapping = NULL;
2267 __i915_gem_object_reset_page_iter(obj);
2268 obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
2273 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2274 enum i915_mm_subclass subclass)
2276 struct sg_table *pages;
2279 if (i915_gem_object_has_pinned_pages(obj))
2282 GEM_BUG_ON(obj->bind_count);
2284 /* May be called by shrinker from within get_pages() (on another bo) */
2285 mutex_lock_nested(&obj->mm.lock, subclass);
2286 if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
2292 * ->put_pages might need to allocate memory for the bit17 swizzle
2293 * array, hence protect them from being reaped by removing them from gtt
2296 pages = __i915_gem_object_unset_pages(obj);
2299 * XXX Temporary hijinx to avoid updating all backends to handle
2300 * NULL pages. In the future, when we have more asynchronous
2301 * get_pages backends we should be better able to handle the
2302 * cancellation of the async task in a more uniform manner.
2304 if (!pages && !i915_gem_object_needs_async_cancel(obj))
2305 pages = ERR_PTR(-EINVAL);
2308 obj->ops->put_pages(obj, pages);
2312 mutex_unlock(&obj->mm.lock);
2317 bool i915_sg_trim(struct sg_table *orig_st)
2319 struct sg_table new_st;
2320 struct scatterlist *sg, *new_sg;
2323 if (orig_st->nents == orig_st->orig_nents)
2326 if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
2329 new_sg = new_st.sgl;
2330 for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
2331 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
2332 sg_dma_address(new_sg) = sg_dma_address(sg);
2333 sg_dma_len(new_sg) = sg_dma_len(sg);
2335 new_sg = sg_next(new_sg);
2337 GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
2339 sg_free_table(orig_st);
2345 static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2347 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2348 const unsigned long page_count = obj->base.size / PAGE_SIZE;
2350 struct address_space *mapping;
2351 struct sg_table *st;
2352 struct scatterlist *sg;
2353 struct sgt_iter sgt_iter;
2355 unsigned long last_pfn = 0; /* suppress gcc warning */
2356 unsigned int max_segment = i915_sg_segment_size();
2357 unsigned int sg_page_sizes;
2358 struct pagevec pvec;
2363 * Assert that the object is not currently in any GPU domain. As it
2364 * wasn't in the GTT, there shouldn't be any way it could have been in
2367 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2368 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2371 * If there's no chance of allocating enough pages for the whole
2372 * object, bail early.
2374 if (page_count > totalram_pages())
2377 st = kmalloc(sizeof(*st), GFP_KERNEL);
2382 if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2388 * Get the list of pages out of our struct file. They'll be pinned
2389 * at this point until we release them.
2391 * Fail silently without starting the shrinker
2393 mapping = obj->base.filp->f_mapping;
2394 mapping_set_unevictable(mapping);
2395 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
2396 noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
2401 for (i = 0; i < page_count; i++) {
2402 const unsigned int shrink[] = {
2403 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
2406 gfp_t gfp = noreclaim;
2410 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2415 ret = PTR_ERR(page);
2419 i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
2422 * We've tried hard to allocate the memory by reaping
2423 * our own buffer, now let the real VM do its job and
2424 * go down in flames if truly OOM.
2426 * However, since graphics tend to be disposable,
2427 * defer the oom here by reporting the ENOMEM back
2431 /* reclaim and warn, but no oom */
2432 gfp = mapping_gfp_mask(mapping);
2435 * Our bo are always dirty and so we require
2436 * kswapd to reclaim our pages (direct reclaim
2437 * does not effectively begin pageout of our
2438 * buffers on its own). However, direct reclaim
2439 * only waits for kswapd when under allocation
2440 * congestion. So as a result __GFP_RECLAIM is
2441 * unreliable and fails to actually reclaim our
2442 * dirty pages -- unless you try over and over
2443 * again with !__GFP_NORETRY. However, we still
2444 * want to fail this allocation rather than
2445 * trigger the out-of-memory killer and for
2446 * this we want __GFP_RETRY_MAYFAIL.
2448 gfp |= __GFP_RETRY_MAYFAIL;
2453 sg->length >= max_segment ||
2454 page_to_pfn(page) != last_pfn + 1) {
2456 sg_page_sizes |= sg->length;
2460 sg_set_page(sg, page, PAGE_SIZE, 0);
2462 sg->length += PAGE_SIZE;
2464 last_pfn = page_to_pfn(page);
2466 /* Check that the i965g/gm workaround works. */
2467 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2469 if (sg) { /* loop terminated early; short sg table */
2470 sg_page_sizes |= sg->length;
2474 /* Trim unused sg entries to avoid wasting memory. */
2477 ret = i915_gem_gtt_prepare_pages(obj, st);
2480 * DMA remapping failed? One possible cause is that
2481 * it could not reserve enough large entries, asking
2482 * for PAGE_SIZE chunks instead may be helpful.
2484 if (max_segment > PAGE_SIZE) {
2485 for_each_sgt_page(page, sgt_iter, st)
2489 max_segment = PAGE_SIZE;
2492 dev_warn(&dev_priv->drm.pdev->dev,
2493 "Failed to DMA remap %lu pages\n",
2499 if (i915_gem_object_needs_bit17_swizzle(obj))
2500 i915_gem_object_do_bit_17_swizzle(obj, st);
2502 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
2509 mapping_clear_unevictable(mapping);
2510 pagevec_init(&pvec);
2511 for_each_sgt_page(page, sgt_iter, st) {
2512 if (!pagevec_add(&pvec, page))
2513 check_release_pagevec(&pvec);
2515 if (pagevec_count(&pvec))
2516 check_release_pagevec(&pvec);
2521 * shmemfs first checks if there is enough memory to allocate the page
2522 * and reports ENOSPC should there be insufficient, along with the usual
2523 * ENOMEM for a genuine allocation failure.
2525 * We use ENOSPC in our driver to mean that we have run out of aperture
2526 * space and so want to translate the error from shmemfs back to our
2527 * usual understanding of ENOMEM.
2535 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2536 struct sg_table *pages,
2537 unsigned int sg_page_sizes)
2539 struct drm_i915_private *i915 = to_i915(obj->base.dev);
2540 unsigned long supported = INTEL_INFO(i915)->page_sizes;
2543 lockdep_assert_held(&obj->mm.lock);
2545 /* Make the pages coherent with the GPU (flushing any swapin). */
2546 if (obj->cache_dirty) {
2547 obj->write_domain = 0;
2548 if (i915_gem_object_has_struct_page(obj))
2549 drm_clflush_sg(pages);
2550 obj->cache_dirty = false;
2553 obj->mm.get_page.sg_pos = pages->sgl;
2554 obj->mm.get_page.sg_idx = 0;
2556 obj->mm.pages = pages;
2558 if (i915_gem_object_is_tiled(obj) &&
2559 i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2560 GEM_BUG_ON(obj->mm.quirked);
2561 __i915_gem_object_pin_pages(obj);
2562 obj->mm.quirked = true;
2565 GEM_BUG_ON(!sg_page_sizes);
2566 obj->mm.page_sizes.phys = sg_page_sizes;
2569 * Calculate the supported page-sizes which fit into the given
2570 * sg_page_sizes. This will give us the page-sizes which we may be able
2571 * to use opportunistically when later inserting into the GTT. For
2572 * example if phys=2G, then in theory we should be able to use 1G, 2M,
2573 * 64K or 4K pages, although in practice this will depend on a number of
2576 obj->mm.page_sizes.sg = 0;
2577 for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
2578 if (obj->mm.page_sizes.phys & ~0u << i)
2579 obj->mm.page_sizes.sg |= BIT(i);
2581 GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
2583 spin_lock(&i915->mm.obj_lock);
2584 list_add(&obj->mm.link, &i915->mm.unbound_list);
2585 spin_unlock(&i915->mm.obj_lock);
2588 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2592 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
2593 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2597 err = obj->ops->get_pages(obj);
2598 GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
2603 /* Ensure that the associated pages are gathered from the backing storage
2604 * and pinned into our object. i915_gem_object_pin_pages() may be called
2605 * multiple times before they are released by a single call to
2606 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2607 * either as a result of memory pressure (reaping pages under the shrinker)
2608 * or as the object is itself released.
2610 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2614 err = mutex_lock_interruptible(&obj->mm.lock);
2618 if (unlikely(!i915_gem_object_has_pages(obj))) {
2619 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2621 err = ____i915_gem_object_get_pages(obj);
2625 smp_mb__before_atomic();
2627 atomic_inc(&obj->mm.pages_pin_count);
2630 mutex_unlock(&obj->mm.lock);
2634 /* The 'mapping' part of i915_gem_object_pin_map() below */
2635 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2636 enum i915_map_type type)
2638 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2639 struct sg_table *sgt = obj->mm.pages;
2640 struct sgt_iter sgt_iter;
2642 struct page *stack_pages[32];
2643 struct page **pages = stack_pages;
2644 unsigned long i = 0;
2648 /* A single page can always be kmapped */
2649 if (n_pages == 1 && type == I915_MAP_WB)
2650 return kmap(sg_page(sgt->sgl));
2652 if (n_pages > ARRAY_SIZE(stack_pages)) {
2653 /* Too big for stack -- allocate temporary array instead */
2654 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
2659 for_each_sgt_page(page, sgt_iter, sgt)
2662 /* Check that we have the expected number of pages */
2663 GEM_BUG_ON(i != n_pages);
2668 /* fallthrough to use PAGE_KERNEL anyway */
2670 pgprot = PAGE_KERNEL;
2673 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2676 addr = vmap(pages, n_pages, 0, pgprot);
2678 if (pages != stack_pages)
2684 /* get, pin, and map the pages of the object into kernel space */
2685 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2686 enum i915_map_type type)
2688 enum i915_map_type has_type;
2693 if (unlikely(!i915_gem_object_has_struct_page(obj)))
2694 return ERR_PTR(-ENXIO);
2696 ret = mutex_lock_interruptible(&obj->mm.lock);
2698 return ERR_PTR(ret);
2700 pinned = !(type & I915_MAP_OVERRIDE);
2701 type &= ~I915_MAP_OVERRIDE;
2703 if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2704 if (unlikely(!i915_gem_object_has_pages(obj))) {
2705 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2707 ret = ____i915_gem_object_get_pages(obj);
2711 smp_mb__before_atomic();
2713 atomic_inc(&obj->mm.pages_pin_count);
2716 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
2718 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
2719 if (ptr && has_type != type) {
2725 if (is_vmalloc_addr(ptr))
2728 kunmap(kmap_to_page(ptr));
2730 ptr = obj->mm.mapping = NULL;
2734 ptr = i915_gem_object_map(obj, type);
2740 obj->mm.mapping = page_pack_bits(ptr, type);
2744 mutex_unlock(&obj->mm.lock);
2748 atomic_dec(&obj->mm.pages_pin_count);
2754 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
2755 unsigned long offset,
2758 enum i915_map_type has_type;
2761 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
2762 GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
2763 offset, size, obj->base.size));
2765 obj->mm.dirty = true;
2767 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
2770 ptr = page_unpack_bits(obj->mm.mapping, &has_type);
2771 if (has_type == I915_MAP_WC)
2774 drm_clflush_virt_range(ptr + offset, size);
2775 if (size == obj->base.size) {
2776 obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
2777 obj->cache_dirty = false;
2782 i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2783 const struct drm_i915_gem_pwrite *arg)
2785 struct address_space *mapping = obj->base.filp->f_mapping;
2786 char __user *user_data = u64_to_user_ptr(arg->data_ptr);
2790 /* Before we instantiate/pin the backing store for our use, we
2791 * can prepopulate the shmemfs filp efficiently using a write into
2792 * the pagecache. We avoid the penalty of instantiating all the
2793 * pages, important if the user is just writing to a few and never
2794 * uses the object on the GPU, and using a direct write into shmemfs
2795 * allows it to avoid the cost of retrieving a page (either swapin
2796 * or clearing-before-use) before it is overwritten.
2798 if (i915_gem_object_has_pages(obj))
2801 if (obj->mm.madv != I915_MADV_WILLNEED)
2804 /* Before the pages are instantiated the object is treated as being
2805 * in the CPU domain. The pages will be clflushed as required before
2806 * use, and we can freely write into the pages directly. If userspace
2807 * races pwrite with any other operation; corruption will ensue -
2808 * that is userspace's prerogative!
2812 offset = arg->offset;
2813 pg = offset_in_page(offset);
2816 unsigned int len, unwritten;
2821 len = PAGE_SIZE - pg;
2825 err = pagecache_write_begin(obj->base.filp, mapping,
2832 unwritten = copy_from_user(vaddr + pg, user_data, len);
2835 err = pagecache_write_end(obj->base.filp, mapping,
2836 offset, len, len - unwritten,
2854 i915_gem_retire_work_handler(struct work_struct *work)
2856 struct drm_i915_private *dev_priv =
2857 container_of(work, typeof(*dev_priv), gt.retire_work.work);
2858 struct drm_device *dev = &dev_priv->drm;
2860 /* Come back later if the device is busy... */
2861 if (mutex_trylock(&dev->struct_mutex)) {
2862 i915_retire_requests(dev_priv);
2863 mutex_unlock(&dev->struct_mutex);
2867 * Keep the retire handler running until we are finally idle.
2868 * We do not need to do this test under locking as in the worst-case
2869 * we queue the retire worker once too often.
2871 if (READ_ONCE(dev_priv->gt.awake))
2872 queue_delayed_work(dev_priv->wq,
2873 &dev_priv->gt.retire_work,
2874 round_jiffies_up_relative(HZ));
2877 static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
2883 * Even if we fail to switch, give whatever is running a small chance
2884 * to save itself before we report the failure. Yes, this may be a
2885 * false positive due to e.g. ENOMEM, caveat emptor!
2887 if (i915_gem_switch_to_kernel_context(i915, mask))
2890 if (i915_gem_wait_for_idle(i915,
2892 I915_WAIT_FOR_IDLE_BOOST,
2893 I915_GEM_IDLE_TIMEOUT))
2897 if (i915_modparams.reset) { /* XXX hide warning from gem_eio */
2898 dev_err(i915->drm.dev,
2899 "Failed to idle engines, declaring wedged!\n");
2903 /* Forcibly cancel outstanding work and leave the gpu quiet. */
2904 i915_gem_set_wedged(i915);
2907 i915_retire_requests(i915); /* ensure we flush after wedging */
2911 static bool load_power_context(struct drm_i915_private *i915)
2913 /* Force loading the kernel context on all engines */
2914 if (!switch_to_kernel_context_sync(i915, ALL_ENGINES))
2918 * Immediately park the GPU so that we enable powersaving and
2919 * treat it as idle. The next time we issue a request, we will
2920 * unpark and start using the engine->pinned_default_state, otherwise
2921 * it is in limbo and an early reset may fail.
2923 __i915_gem_park(i915);
2929 i915_gem_idle_work_handler(struct work_struct *work)
2931 struct drm_i915_private *i915 =
2932 container_of(work, typeof(*i915), gt.idle_work.work);
2933 bool rearm_hangcheck;
2935 if (!READ_ONCE(i915->gt.awake))
2938 if (READ_ONCE(i915->gt.active_requests))
2942 cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
2944 if (!mutex_trylock(&i915->drm.struct_mutex)) {
2945 /* Currently busy, come back later */
2946 mod_delayed_work(i915->wq,
2947 &i915->gt.idle_work,
2948 msecs_to_jiffies(50));
2953 * Flush out the last user context, leaving only the pinned
2954 * kernel context resident. Should anything unfortunate happen
2955 * while we are idle (such as the GPU being power cycled), no users
2958 if (!work_pending(&i915->gt.idle_work.work) &&
2959 !i915->gt.active_requests) {
2960 ++i915->gt.active_requests; /* don't requeue idle */
2962 switch_to_kernel_context_sync(i915, i915->gt.active_engines);
2964 if (!--i915->gt.active_requests) {
2965 __i915_gem_park(i915);
2966 rearm_hangcheck = false;
2970 mutex_unlock(&i915->drm.struct_mutex);
2973 if (rearm_hangcheck) {
2974 GEM_BUG_ON(!i915->gt.awake);
2975 i915_queue_hangcheck(i915);
2979 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
2981 struct drm_i915_private *i915 = to_i915(gem->dev);
2982 struct drm_i915_gem_object *obj = to_intel_bo(gem);
2983 struct drm_i915_file_private *fpriv = file->driver_priv;
2984 struct i915_lut_handle *lut, *ln;
2986 mutex_lock(&i915->drm.struct_mutex);
2988 list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
2989 struct i915_gem_context *ctx = lut->ctx;
2990 struct i915_vma *vma;
2992 GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
2993 if (ctx->file_priv != fpriv)
2996 vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
2997 GEM_BUG_ON(vma->obj != obj);
2999 /* We allow the process to have multiple handles to the same
3000 * vma, in the same fd namespace, by virtue of flink/open.
3002 GEM_BUG_ON(!vma->open_count);
3003 if (!--vma->open_count && !i915_vma_is_ggtt(vma))
3004 i915_vma_close(vma);
3006 list_del(&lut->obj_link);
3007 list_del(&lut->ctx_link);
3009 i915_lut_handle_free(lut);
3010 __i915_gem_object_release_unless_active(obj);
3013 mutex_unlock(&i915->drm.struct_mutex);
3016 static unsigned long to_wait_timeout(s64 timeout_ns)
3019 return MAX_SCHEDULE_TIMEOUT;
3021 if (timeout_ns == 0)
3024 return nsecs_to_jiffies_timeout(timeout_ns);
3028 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3029 * @dev: drm device pointer
3030 * @data: ioctl data blob
3031 * @file: drm file pointer
3033 * Returns 0 if successful, else an error is returned with the remaining time in
3034 * the timeout parameter.
3035 * -ETIME: object is still busy after timeout
3036 * -ERESTARTSYS: signal interrupted the wait
3037 * -ENONENT: object doesn't exist
3038 * Also possible, but rare:
3039 * -EAGAIN: incomplete, restart syscall
3041 * -ENODEV: Internal IRQ fail
3042 * -E?: The add request failed
3044 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3045 * non-zero timeout parameter the wait ioctl will wait for the given number of
3046 * nanoseconds on an object becoming unbusy. Since the wait itself does so
3047 * without holding struct_mutex the object may become re-busied before this
3048 * function completes. A similar but shorter * race condition exists in the busy
3052 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3054 struct drm_i915_gem_wait *args = data;
3055 struct drm_i915_gem_object *obj;
3059 if (args->flags != 0)
3062 obj = i915_gem_object_lookup(file, args->bo_handle);
3066 start = ktime_get();
3068 ret = i915_gem_object_wait(obj,
3069 I915_WAIT_INTERRUPTIBLE |
3070 I915_WAIT_PRIORITY |
3072 to_wait_timeout(args->timeout_ns));
3074 if (args->timeout_ns > 0) {
3075 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
3076 if (args->timeout_ns < 0)
3077 args->timeout_ns = 0;
3080 * Apparently ktime isn't accurate enough and occasionally has a
3081 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
3082 * things up to make the test happy. We allow up to 1 jiffy.
3084 * This is a regression from the timespec->ktime conversion.
3086 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
3087 args->timeout_ns = 0;
3089 /* Asked to wait beyond the jiffie/scheduler precision? */
3090 if (ret == -ETIME && args->timeout_ns)
3094 i915_gem_object_put(obj);
3098 static int wait_for_engines(struct drm_i915_private *i915)
3100 if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
3101 dev_err(i915->drm.dev,
3102 "Failed to idle engines, declaring wedged!\n");
3104 i915_gem_set_wedged(i915);
3112 wait_for_timelines(struct drm_i915_private *i915,
3113 unsigned int flags, long timeout)
3115 struct i915_gt_timelines *gt = &i915->gt.timelines;
3116 struct i915_timeline *tl;
3118 if (!READ_ONCE(i915->gt.active_requests))
3121 mutex_lock(>->mutex);
3122 list_for_each_entry(tl, >->active_list, link) {
3123 struct i915_request *rq;
3125 rq = i915_active_request_get_unlocked(&tl->last_request);
3129 mutex_unlock(>->mutex);
3134 * Switching to the kernel context is often used a synchronous
3135 * step prior to idling, e.g. in suspend for flushing all
3136 * current operations to memory before sleeping. These we
3137 * want to complete as quickly as possible to avoid prolonged
3138 * stalls, so allow the gpu to boost to maximum clocks.
3140 if (flags & I915_WAIT_FOR_IDLE_BOOST)
3143 timeout = i915_request_wait(rq, flags, timeout);
3144 i915_request_put(rq);
3148 /* restart after reacquiring the lock */
3149 mutex_lock(>->mutex);
3150 tl = list_entry(>->active_list, typeof(*tl), link);
3152 mutex_unlock(>->mutex);
3157 int i915_gem_wait_for_idle(struct drm_i915_private *i915,
3158 unsigned int flags, long timeout)
3160 GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
3161 flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
3162 timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
3164 /* If the device is asleep, we have no requests outstanding */
3165 if (!READ_ONCE(i915->gt.awake))
3168 timeout = wait_for_timelines(i915, flags, timeout);
3172 if (flags & I915_WAIT_LOCKED) {
3175 lockdep_assert_held(&i915->drm.struct_mutex);
3177 err = wait_for_engines(i915);
3181 i915_retire_requests(i915);
3187 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
3190 * We manually flush the CPU domain so that we can override and
3191 * force the flush for the display, and perform it asyncrhonously.
3193 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
3194 if (obj->cache_dirty)
3195 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
3196 obj->write_domain = 0;
3199 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
3201 if (!READ_ONCE(obj->pin_global))
3204 mutex_lock(&obj->base.dev->struct_mutex);
3205 __i915_gem_object_flush_for_display(obj);
3206 mutex_unlock(&obj->base.dev->struct_mutex);
3210 * Moves a single object to the WC read, and possibly write domain.
3211 * @obj: object to act on
3212 * @write: ask for write access or read only
3214 * This function returns when the move is complete, including waiting on
3218 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
3222 lockdep_assert_held(&obj->base.dev->struct_mutex);
3224 ret = i915_gem_object_wait(obj,
3225 I915_WAIT_INTERRUPTIBLE |
3227 (write ? I915_WAIT_ALL : 0),
3228 MAX_SCHEDULE_TIMEOUT);
3232 if (obj->write_domain == I915_GEM_DOMAIN_WC)
3235 /* Flush and acquire obj->pages so that we are coherent through
3236 * direct access in memory with previous cached writes through
3237 * shmemfs and that our cache domain tracking remains valid.
3238 * For example, if the obj->filp was moved to swap without us
3239 * being notified and releasing the pages, we would mistakenly
3240 * continue to assume that the obj remained out of the CPU cached
3243 ret = i915_gem_object_pin_pages(obj);
3247 flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
3249 /* Serialise direct access to this object with the barriers for
3250 * coherent writes from the GPU, by effectively invalidating the
3251 * WC domain upon first access.
3253 if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
3256 /* It should now be out of any other write domains, and we can update
3257 * the domain values for our changes.
3259 GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
3260 obj->read_domains |= I915_GEM_DOMAIN_WC;
3262 obj->read_domains = I915_GEM_DOMAIN_WC;
3263 obj->write_domain = I915_GEM_DOMAIN_WC;
3264 obj->mm.dirty = true;
3267 i915_gem_object_unpin_pages(obj);
3272 * Moves a single object to the GTT read, and possibly write domain.
3273 * @obj: object to act on
3274 * @write: ask for write access or read only
3276 * This function returns when the move is complete, including waiting on
3280 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3284 lockdep_assert_held(&obj->base.dev->struct_mutex);
3286 ret = i915_gem_object_wait(obj,
3287 I915_WAIT_INTERRUPTIBLE |
3289 (write ? I915_WAIT_ALL : 0),
3290 MAX_SCHEDULE_TIMEOUT);
3294 if (obj->write_domain == I915_GEM_DOMAIN_GTT)
3297 /* Flush and acquire obj->pages so that we are coherent through
3298 * direct access in memory with previous cached writes through
3299 * shmemfs and that our cache domain tracking remains valid.
3300 * For example, if the obj->filp was moved to swap without us
3301 * being notified and releasing the pages, we would mistakenly
3302 * continue to assume that the obj remained out of the CPU cached
3305 ret = i915_gem_object_pin_pages(obj);
3309 flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
3311 /* Serialise direct access to this object with the barriers for
3312 * coherent writes from the GPU, by effectively invalidating the
3313 * GTT domain upon first access.
3315 if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
3318 /* It should now be out of any other write domains, and we can update
3319 * the domain values for our changes.
3321 GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3322 obj->read_domains |= I915_GEM_DOMAIN_GTT;
3324 obj->read_domains = I915_GEM_DOMAIN_GTT;
3325 obj->write_domain = I915_GEM_DOMAIN_GTT;
3326 obj->mm.dirty = true;
3329 i915_gem_object_unpin_pages(obj);
3334 * Changes the cache-level of an object across all VMA.
3335 * @obj: object to act on
3336 * @cache_level: new cache level to set for the object
3338 * After this function returns, the object will be in the new cache-level
3339 * across all GTT and the contents of the backing storage will be coherent,
3340 * with respect to the new cache-level. In order to keep the backing storage
3341 * coherent for all users, we only allow a single cache level to be set
3342 * globally on the object and prevent it from being changed whilst the
3343 * hardware is reading from the object. That is if the object is currently
3344 * on the scanout it will be set to uncached (or equivalent display
3345 * cache coherency) and all non-MOCS GPU access will also be uncached so
3346 * that all direct access to the scanout remains coherent.
3348 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3349 enum i915_cache_level cache_level)
3351 struct i915_vma *vma;
3354 lockdep_assert_held(&obj->base.dev->struct_mutex);
3356 if (obj->cache_level == cache_level)
3359 /* Inspect the list of currently bound VMA and unbind any that would
3360 * be invalid given the new cache-level. This is principally to
3361 * catch the issue of the CS prefetch crossing page boundaries and
3362 * reading an invalid PTE on older architectures.
3365 list_for_each_entry(vma, &obj->vma.list, obj_link) {
3366 if (!drm_mm_node_allocated(&vma->node))
3369 if (i915_vma_is_pinned(vma)) {
3370 DRM_DEBUG("can not change the cache level of pinned objects\n");
3374 if (!i915_vma_is_closed(vma) &&
3375 i915_gem_valid_gtt_space(vma, cache_level))
3378 ret = i915_vma_unbind(vma);
3382 /* As unbinding may affect other elements in the
3383 * obj->vma_list (due to side-effects from retiring
3384 * an active vma), play safe and restart the iterator.
3389 /* We can reuse the existing drm_mm nodes but need to change the
3390 * cache-level on the PTE. We could simply unbind them all and
3391 * rebind with the correct cache-level on next use. However since
3392 * we already have a valid slot, dma mapping, pages etc, we may as
3393 * rewrite the PTE in the belief that doing so tramples upon less
3394 * state and so involves less work.
3396 if (obj->bind_count) {
3397 /* Before we change the PTE, the GPU must not be accessing it.
3398 * If we wait upon the object, we know that all the bound
3399 * VMA are no longer active.
3401 ret = i915_gem_object_wait(obj,
3402 I915_WAIT_INTERRUPTIBLE |
3405 MAX_SCHEDULE_TIMEOUT);
3409 if (!HAS_LLC(to_i915(obj->base.dev)) &&
3410 cache_level != I915_CACHE_NONE) {
3411 /* Access to snoopable pages through the GTT is
3412 * incoherent and on some machines causes a hard
3413 * lockup. Relinquish the CPU mmaping to force
3414 * userspace to refault in the pages and we can
3415 * then double check if the GTT mapping is still
3416 * valid for that pointer access.
3418 i915_gem_release_mmap(obj);
3420 /* As we no longer need a fence for GTT access,
3421 * we can relinquish it now (and so prevent having
3422 * to steal a fence from someone else on the next
3423 * fence request). Note GPU activity would have
3424 * dropped the fence as all snoopable access is
3425 * supposed to be linear.
3427 for_each_ggtt_vma(vma, obj) {
3428 ret = i915_vma_put_fence(vma);
3433 /* We either have incoherent backing store and
3434 * so no GTT access or the architecture is fully
3435 * coherent. In such cases, existing GTT mmaps
3436 * ignore the cache bit in the PTE and we can
3437 * rewrite it without confusing the GPU or having
3438 * to force userspace to fault back in its mmaps.
3442 list_for_each_entry(vma, &obj->vma.list, obj_link) {
3443 if (!drm_mm_node_allocated(&vma->node))
3446 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3452 list_for_each_entry(vma, &obj->vma.list, obj_link)
3453 vma->node.color = cache_level;
3454 i915_gem_object_set_cache_coherency(obj, cache_level);
3455 obj->cache_dirty = true; /* Always invalidate stale cachelines */
3460 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3461 struct drm_file *file)
3463 struct drm_i915_gem_caching *args = data;
3464 struct drm_i915_gem_object *obj;
3468 obj = i915_gem_object_lookup_rcu(file, args->handle);
3474 switch (obj->cache_level) {
3475 case I915_CACHE_LLC:
3476 case I915_CACHE_L3_LLC:
3477 args->caching = I915_CACHING_CACHED;
3481 args->caching = I915_CACHING_DISPLAY;
3485 args->caching = I915_CACHING_NONE;
3493 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3494 struct drm_file *file)
3496 struct drm_i915_private *i915 = to_i915(dev);
3497 struct drm_i915_gem_caching *args = data;
3498 struct drm_i915_gem_object *obj;
3499 enum i915_cache_level level;
3502 switch (args->caching) {
3503 case I915_CACHING_NONE:
3504 level = I915_CACHE_NONE;
3506 case I915_CACHING_CACHED:
3508 * Due to a HW issue on BXT A stepping, GPU stores via a
3509 * snooped mapping may leave stale data in a corresponding CPU
3510 * cacheline, whereas normally such cachelines would get
3513 if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3516 level = I915_CACHE_LLC;
3518 case I915_CACHING_DISPLAY:
3519 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3525 obj = i915_gem_object_lookup(file, args->handle);
3530 * The caching mode of proxy object is handled by its generator, and
3531 * not allowed to be changed by userspace.
3533 if (i915_gem_object_is_proxy(obj)) {
3538 if (obj->cache_level == level)
3541 ret = i915_gem_object_wait(obj,
3542 I915_WAIT_INTERRUPTIBLE,
3543 MAX_SCHEDULE_TIMEOUT);
3547 ret = i915_mutex_lock_interruptible(dev);
3551 ret = i915_gem_object_set_cache_level(obj, level);
3552 mutex_unlock(&dev->struct_mutex);
3555 i915_gem_object_put(obj);
3560 * Prepare buffer for display plane (scanout, cursors, etc). Can be called from
3561 * an uninterruptible phase (modesetting) and allows any flushes to be pipelined
3562 * (for pageflips). We only flush the caches while preparing the buffer for
3563 * display, the callers are responsible for frontbuffer flush.
3566 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3568 const struct i915_ggtt_view *view,
3571 struct i915_vma *vma;
3574 lockdep_assert_held(&obj->base.dev->struct_mutex);
3576 /* Mark the global pin early so that we account for the
3577 * display coherency whilst setting up the cache domains.
3581 /* The display engine is not coherent with the LLC cache on gen6. As
3582 * a result, we make sure that the pinning that is about to occur is
3583 * done with uncached PTEs. This is lowest common denominator for all
3586 * However for gen6+, we could do better by using the GFDT bit instead
3587 * of uncaching, which would allow us to flush all the LLC-cached data
3588 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3590 ret = i915_gem_object_set_cache_level(obj,
3591 HAS_WT(to_i915(obj->base.dev)) ?
3592 I915_CACHE_WT : I915_CACHE_NONE);
3595 goto err_unpin_global;
3598 /* As the user may map the buffer once pinned in the display plane
3599 * (e.g. libkms for the bootup splash), we have to ensure that we
3600 * always use map_and_fenceable for all scanout buffers. However,
3601 * it may simply be too big to fit into mappable, in which case
3602 * put it anyway and hope that userspace can cope (but always first
3603 * try to preserve the existing ABI).
3605 vma = ERR_PTR(-ENOSPC);
3606 if ((flags & PIN_MAPPABLE) == 0 &&
3607 (!view || view->type == I915_GGTT_VIEW_NORMAL))
3608 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
3613 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
3615 goto err_unpin_global;
3617 vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
3619 __i915_gem_object_flush_for_display(obj);
3621 /* It should now be out of any other write domains, and we can update
3622 * the domain values for our changes.
3624 obj->read_domains |= I915_GEM_DOMAIN_GTT;
3634 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3636 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
3638 if (WARN_ON(vma->obj->pin_global == 0))
3641 if (--vma->obj->pin_global == 0)
3642 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
3644 /* Bump the LRU to try and avoid premature eviction whilst flipping */
3645 i915_gem_object_bump_inactive_ggtt(vma->obj);
3647 i915_vma_unpin(vma);
3651 * Moves a single object to the CPU read, and possibly write domain.
3652 * @obj: object to act on
3653 * @write: requesting write or read-only access
3655 * This function returns when the move is complete, including waiting on
3659 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3663 lockdep_assert_held(&obj->base.dev->struct_mutex);
3665 ret = i915_gem_object_wait(obj,
3666 I915_WAIT_INTERRUPTIBLE |
3668 (write ? I915_WAIT_ALL : 0),
3669 MAX_SCHEDULE_TIMEOUT);
3673 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
3675 /* Flush the CPU cache if it's still invalid. */
3676 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3677 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
3678 obj->read_domains |= I915_GEM_DOMAIN_CPU;
3681 /* It should now be out of any other write domains, and we can update
3682 * the domain values for our changes.
3684 GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
3686 /* If we're writing through the CPU, then the GPU read domains will
3687 * need to be invalidated at next use.
3690 __start_cpu_write(obj);
3695 /* Throttle our rendering by waiting until the ring has completed our requests
3696 * emitted over 20 msec ago.
3698 * Note that if we were to use the current jiffies each time around the loop,
3699 * we wouldn't escape the function with any frames outstanding if the time to
3700 * render a frame was over 20ms.
3702 * This should get us reasonable parallelism between CPU and GPU but also
3703 * relatively low latency when blocking on a particular request to finish.
3706 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3708 struct drm_i915_private *dev_priv = to_i915(dev);
3709 struct drm_i915_file_private *file_priv = file->driver_priv;
3710 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3711 struct i915_request *request, *target = NULL;
3714 /* ABI: return -EIO if already wedged */
3715 ret = i915_terminally_wedged(dev_priv);
3719 spin_lock(&file_priv->mm.lock);
3720 list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
3721 if (time_after_eq(request->emitted_jiffies, recent_enough))
3725 list_del(&target->client_link);
3726 target->file_priv = NULL;
3732 i915_request_get(target);
3733 spin_unlock(&file_priv->mm.lock);
3738 ret = i915_request_wait(target,
3739 I915_WAIT_INTERRUPTIBLE,
3740 MAX_SCHEDULE_TIMEOUT);
3741 i915_request_put(target);
3743 return ret < 0 ? ret : 0;
3747 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
3748 const struct i915_ggtt_view *view,
3753 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3754 struct i915_address_space *vm = &dev_priv->ggtt.vm;
3755 struct i915_vma *vma;
3758 lockdep_assert_held(&obj->base.dev->struct_mutex);
3760 if (flags & PIN_MAPPABLE &&
3761 (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
3762 /* If the required space is larger than the available
3763 * aperture, we will not able to find a slot for the
3764 * object and unbinding the object now will be in
3765 * vain. Worse, doing so may cause us to ping-pong
3766 * the object in and out of the Global GTT and
3767 * waste a lot of cycles under the mutex.
3769 if (obj->base.size > dev_priv->ggtt.mappable_end)
3770 return ERR_PTR(-E2BIG);
3772 /* If NONBLOCK is set the caller is optimistically
3773 * trying to cache the full object within the mappable
3774 * aperture, and *must* have a fallback in place for
3775 * situations where we cannot bind the object. We
3776 * can be a little more lax here and use the fallback
3777 * more often to avoid costly migrations of ourselves
3778 * and other objects within the aperture.
3780 * Half-the-aperture is used as a simple heuristic.
3781 * More interesting would to do search for a free
3782 * block prior to making the commitment to unbind.
3783 * That caters for the self-harm case, and with a
3784 * little more heuristics (e.g. NOFAULT, NOEVICT)
3785 * we could try to minimise harm to others.
3787 if (flags & PIN_NONBLOCK &&
3788 obj->base.size > dev_priv->ggtt.mappable_end / 2)
3789 return ERR_PTR(-ENOSPC);
3792 vma = i915_vma_instance(obj, vm, view);
3796 if (i915_vma_misplaced(vma, size, alignment, flags)) {
3797 if (flags & PIN_NONBLOCK) {
3798 if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
3799 return ERR_PTR(-ENOSPC);
3801 if (flags & PIN_MAPPABLE &&
3802 vma->fence_size > dev_priv->ggtt.mappable_end / 2)
3803 return ERR_PTR(-ENOSPC);
3806 WARN(i915_vma_is_pinned(vma),
3807 "bo is already pinned in ggtt with incorrect alignment:"
3808 " offset=%08x, req.alignment=%llx,"
3809 " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
3810 i915_ggtt_offset(vma), alignment,
3811 !!(flags & PIN_MAPPABLE),
3812 i915_vma_is_map_and_fenceable(vma));
3813 ret = i915_vma_unbind(vma);
3815 return ERR_PTR(ret);
3818 ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
3820 return ERR_PTR(ret);
3825 static __always_inline unsigned int __busy_read_flag(unsigned int id)
3827 if (id == I915_ENGINE_CLASS_INVALID)
3830 GEM_BUG_ON(id >= 16);
3831 return 0x10000 << id;
3834 static __always_inline unsigned int __busy_write_id(unsigned int id)
3837 * The uABI guarantees an active writer is also amongst the read
3838 * engines. This would be true if we accessed the activity tracking
3839 * under the lock, but as we perform the lookup of the object and
3840 * its activity locklessly we can not guarantee that the last_write
3841 * being active implies that we have set the same engine flag from
3842 * last_read - hence we always set both read and write busy for
3845 if (id == I915_ENGINE_CLASS_INVALID)
3848 return (id + 1) | __busy_read_flag(id);
3851 static __always_inline unsigned int
3852 __busy_set_if_active(const struct dma_fence *fence,
3853 unsigned int (*flag)(unsigned int id))
3855 const struct i915_request *rq;
3858 * We have to check the current hw status of the fence as the uABI
3859 * guarantees forward progress. We could rely on the idle worker
3860 * to eventually flush us, but to minimise latency just ask the
3863 * Note we only report on the status of native fences.
3865 if (!dma_fence_is_i915(fence))
3868 /* opencode to_request() in order to avoid const warnings */
3869 rq = container_of(fence, const struct i915_request, fence);
3870 if (i915_request_completed(rq))
3873 return flag(rq->engine->uabi_class);
3876 static __always_inline unsigned int
3877 busy_check_reader(const struct dma_fence *fence)
3879 return __busy_set_if_active(fence, __busy_read_flag);
3882 static __always_inline unsigned int
3883 busy_check_writer(const struct dma_fence *fence)
3888 return __busy_set_if_active(fence, __busy_write_id);
3892 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3893 struct drm_file *file)
3895 struct drm_i915_gem_busy *args = data;
3896 struct drm_i915_gem_object *obj;
3897 struct reservation_object_list *list;
3903 obj = i915_gem_object_lookup_rcu(file, args->handle);
3908 * A discrepancy here is that we do not report the status of
3909 * non-i915 fences, i.e. even though we may report the object as idle,
3910 * a call to set-domain may still stall waiting for foreign rendering.
3911 * This also means that wait-ioctl may report an object as busy,
3912 * where busy-ioctl considers it idle.
3914 * We trade the ability to warn of foreign fences to report on which
3915 * i915 engines are active for the object.
3917 * Alternatively, we can trade that extra information on read/write
3920 * !reservation_object_test_signaled_rcu(obj->resv, true);
3921 * to report the overall busyness. This is what the wait-ioctl does.
3925 seq = raw_read_seqcount(&obj->resv->seq);
3927 /* Translate the exclusive fence to the READ *and* WRITE engine */
3928 args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
3930 /* Translate shared fences to READ set of engines */
3931 list = rcu_dereference(obj->resv->fence);
3933 unsigned int shared_count = list->shared_count, i;
3935 for (i = 0; i < shared_count; ++i) {
3936 struct dma_fence *fence =
3937 rcu_dereference(list->shared[i]);
3939 args->busy |= busy_check_reader(fence);
3943 if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
3953 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
3954 struct drm_file *file_priv)
3956 return i915_gem_ring_throttle(dev, file_priv);
3960 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
3961 struct drm_file *file_priv)
3963 struct drm_i915_private *dev_priv = to_i915(dev);
3964 struct drm_i915_gem_madvise *args = data;
3965 struct drm_i915_gem_object *obj;
3968 switch (args->madv) {
3969 case I915_MADV_DONTNEED:
3970 case I915_MADV_WILLNEED:
3976 obj = i915_gem_object_lookup(file_priv, args->handle);
3980 err = mutex_lock_interruptible(&obj->mm.lock);
3984 if (i915_gem_object_has_pages(obj) &&
3985 i915_gem_object_is_tiled(obj) &&
3986 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
3987 if (obj->mm.madv == I915_MADV_WILLNEED) {
3988 GEM_BUG_ON(!obj->mm.quirked);
3989 __i915_gem_object_unpin_pages(obj);
3990 obj->mm.quirked = false;
3992 if (args->madv == I915_MADV_WILLNEED) {
3993 GEM_BUG_ON(obj->mm.quirked);
3994 __i915_gem_object_pin_pages(obj);
3995 obj->mm.quirked = true;
3999 if (obj->mm.madv != __I915_MADV_PURGED)
4000 obj->mm.madv = args->madv;
4002 /* if the object is no longer attached, discard its backing storage */
4003 if (obj->mm.madv == I915_MADV_DONTNEED &&
4004 !i915_gem_object_has_pages(obj))
4005 i915_gem_object_truncate(obj);
4007 args->retained = obj->mm.madv != __I915_MADV_PURGED;
4008 mutex_unlock(&obj->mm.lock);
4011 i915_gem_object_put(obj);
4016 frontbuffer_retire(struct i915_active_request *active,
4017 struct i915_request *request)
4019 struct drm_i915_gem_object *obj =
4020 container_of(active, typeof(*obj), frontbuffer_write);
4022 intel_fb_obj_flush(obj, ORIGIN_CS);
4025 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4026 const struct drm_i915_gem_object_ops *ops)
4028 mutex_init(&obj->mm.lock);
4030 spin_lock_init(&obj->vma.lock);
4031 INIT_LIST_HEAD(&obj->vma.list);
4033 INIT_LIST_HEAD(&obj->lut_list);
4034 INIT_LIST_HEAD(&obj->batch_pool_link);
4036 init_rcu_head(&obj->rcu);
4040 reservation_object_init(&obj->__builtin_resv);
4041 obj->resv = &obj->__builtin_resv;
4043 obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
4044 i915_active_request_init(&obj->frontbuffer_write,
4045 NULL, frontbuffer_retire);
4047 obj->mm.madv = I915_MADV_WILLNEED;
4048 INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
4049 mutex_init(&obj->mm.get_page.lock);
4051 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4054 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4055 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
4056 I915_GEM_OBJECT_IS_SHRINKABLE,
4058 .get_pages = i915_gem_object_get_pages_gtt,
4059 .put_pages = i915_gem_object_put_pages_gtt,
4061 .pwrite = i915_gem_object_pwrite_gtt,
4064 static int i915_gem_object_create_shmem(struct drm_device *dev,
4065 struct drm_gem_object *obj,
4068 struct drm_i915_private *i915 = to_i915(dev);
4069 unsigned long flags = VM_NORESERVE;
4072 drm_gem_private_object_init(dev, obj, size);
4075 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
4078 filp = shmem_file_setup("i915", size, flags);
4081 return PTR_ERR(filp);
4088 struct drm_i915_gem_object *
4089 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
4091 struct drm_i915_gem_object *obj;
4092 struct address_space *mapping;
4093 unsigned int cache_level;
4097 /* There is a prevalence of the assumption that we fit the object's
4098 * page count inside a 32bit _signed_ variable. Let's document this and
4099 * catch if we ever need to fix it. In the meantime, if you do spot
4100 * such a local variable, please consider fixing!
4102 if (size >> PAGE_SHIFT > INT_MAX)
4103 return ERR_PTR(-E2BIG);
4105 if (overflows_type(size, obj->base.size))
4106 return ERR_PTR(-E2BIG);
4108 obj = i915_gem_object_alloc();
4110 return ERR_PTR(-ENOMEM);
4112 ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
4116 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4117 if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
4118 /* 965gm cannot relocate objects above 4GiB. */
4119 mask &= ~__GFP_HIGHMEM;
4120 mask |= __GFP_DMA32;
4123 mapping = obj->base.filp->f_mapping;
4124 mapping_set_gfp_mask(mapping, mask);
4125 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
4127 i915_gem_object_init(obj, &i915_gem_object_ops);
4129 obj->write_domain = I915_GEM_DOMAIN_CPU;
4130 obj->read_domains = I915_GEM_DOMAIN_CPU;
4132 if (HAS_LLC(dev_priv))
4133 /* On some devices, we can have the GPU use the LLC (the CPU
4134 * cache) for about a 10% performance improvement
4135 * compared to uncached. Graphics requests other than
4136 * display scanout are coherent with the CPU in
4137 * accessing this cache. This means in this mode we
4138 * don't need to clflush on the CPU side, and on the
4139 * GPU side we only need to flush internal caches to
4140 * get data visible to the CPU.
4142 * However, we maintain the display planes as UC, and so
4143 * need to rebind when first used as such.
4145 cache_level = I915_CACHE_LLC;
4147 cache_level = I915_CACHE_NONE;
4149 i915_gem_object_set_cache_coherency(obj, cache_level);
4151 trace_i915_gem_object_create(obj);
4156 i915_gem_object_free(obj);
4157 return ERR_PTR(ret);
4160 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4162 /* If we are the last user of the backing storage (be it shmemfs
4163 * pages or stolen etc), we know that the pages are going to be
4164 * immediately released. In this case, we can then skip copying
4165 * back the contents from the GPU.
4168 if (obj->mm.madv != I915_MADV_WILLNEED)
4171 if (obj->base.filp == NULL)
4174 /* At first glance, this looks racy, but then again so would be
4175 * userspace racing mmap against close. However, the first external
4176 * reference to the filp can only be obtained through the
4177 * i915_gem_mmap_ioctl() which safeguards us against the user
4178 * acquiring such a reference whilst we are in the middle of
4179 * freeing the object.
4181 return atomic_long_read(&obj->base.filp->f_count) == 1;
4184 static void __i915_gem_free_objects(struct drm_i915_private *i915,
4185 struct llist_node *freed)
4187 struct drm_i915_gem_object *obj, *on;
4188 intel_wakeref_t wakeref;
4190 wakeref = intel_runtime_pm_get(i915);
4191 llist_for_each_entry_safe(obj, on, freed, freed) {
4192 struct i915_vma *vma, *vn;
4194 trace_i915_gem_object_destroy(obj);
4196 mutex_lock(&i915->drm.struct_mutex);
4198 GEM_BUG_ON(i915_gem_object_is_active(obj));
4199 list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) {
4200 GEM_BUG_ON(i915_vma_is_active(vma));
4201 vma->flags &= ~I915_VMA_PIN_MASK;
4202 i915_vma_destroy(vma);
4204 GEM_BUG_ON(!list_empty(&obj->vma.list));
4205 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree));
4207 /* This serializes freeing with the shrinker. Since the free
4208 * is delayed, first by RCU then by the workqueue, we want the
4209 * shrinker to be able to free pages of unreferenced objects,
4210 * or else we may oom whilst there are plenty of deferred
4213 if (i915_gem_object_has_pages(obj)) {
4214 spin_lock(&i915->mm.obj_lock);
4215 list_del_init(&obj->mm.link);
4216 spin_unlock(&i915->mm.obj_lock);
4219 mutex_unlock(&i915->drm.struct_mutex);
4221 GEM_BUG_ON(obj->bind_count);
4222 GEM_BUG_ON(obj->userfault_count);
4223 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
4224 GEM_BUG_ON(!list_empty(&obj->lut_list));
4226 if (obj->ops->release)
4227 obj->ops->release(obj);
4229 if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
4230 atomic_set(&obj->mm.pages_pin_count, 0);
4231 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4232 GEM_BUG_ON(i915_gem_object_has_pages(obj));
4234 if (obj->base.import_attach)
4235 drm_prime_gem_destroy(&obj->base, NULL);
4237 reservation_object_fini(&obj->__builtin_resv);
4238 drm_gem_object_release(&obj->base);
4239 i915_gem_info_remove_obj(i915, obj->base.size);
4241 bitmap_free(obj->bit_17);
4242 i915_gem_object_free(obj);
4244 GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
4245 atomic_dec(&i915->mm.free_count);
4250 intel_runtime_pm_put(i915, wakeref);
4253 static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
4255 struct llist_node *freed;
4257 /* Free the oldest, most stale object to keep the free_list short */
4259 if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
4260 /* Only one consumer of llist_del_first() allowed */
4261 spin_lock(&i915->mm.free_lock);
4262 freed = llist_del_first(&i915->mm.free_list);
4263 spin_unlock(&i915->mm.free_lock);
4265 if (unlikely(freed)) {
4267 __i915_gem_free_objects(i915, freed);
4271 static void __i915_gem_free_work(struct work_struct *work)
4273 struct drm_i915_private *i915 =
4274 container_of(work, struct drm_i915_private, mm.free_work);
4275 struct llist_node *freed;
4278 * All file-owned VMA should have been released by this point through
4279 * i915_gem_close_object(), or earlier by i915_gem_context_close().
4280 * However, the object may also be bound into the global GTT (e.g.
4281 * older GPUs without per-process support, or for direct access through
4282 * the GTT either for the user or for scanout). Those VMA still need to
4286 spin_lock(&i915->mm.free_lock);
4287 while ((freed = llist_del_all(&i915->mm.free_list))) {
4288 spin_unlock(&i915->mm.free_lock);
4290 __i915_gem_free_objects(i915, freed);
4294 spin_lock(&i915->mm.free_lock);
4296 spin_unlock(&i915->mm.free_lock);
4299 static void __i915_gem_free_object_rcu(struct rcu_head *head)
4301 struct drm_i915_gem_object *obj =
4302 container_of(head, typeof(*obj), rcu);
4303 struct drm_i915_private *i915 = to_i915(obj->base.dev);
4306 * We reuse obj->rcu for the freed list, so we had better not treat
4307 * it like a rcu_head from this point forwards. And we expect all
4308 * objects to be freed via this path.
4310 destroy_rcu_head(&obj->rcu);
4313 * Since we require blocking on struct_mutex to unbind the freed
4314 * object from the GPU before releasing resources back to the
4315 * system, we can not do that directly from the RCU callback (which may
4316 * be a softirq context), but must instead then defer that work onto a
4317 * kthread. We use the RCU callback rather than move the freed object
4318 * directly onto the work queue so that we can mix between using the
4319 * worker and performing frees directly from subsequent allocations for
4320 * crude but effective memory throttling.
4322 if (llist_add(&obj->freed, &i915->mm.free_list))
4323 queue_work(i915->wq, &i915->mm.free_work);
4326 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4328 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4330 if (obj->mm.quirked)
4331 __i915_gem_object_unpin_pages(obj);
4333 if (discard_backing_storage(obj))
4334 obj->mm.madv = I915_MADV_DONTNEED;
4337 * Before we free the object, make sure any pure RCU-only
4338 * read-side critical sections are complete, e.g.
4339 * i915_gem_busy_ioctl(). For the corresponding synchronized
4340 * lookup see i915_gem_object_lookup_rcu().
4342 atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
4343 call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4346 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
4348 lockdep_assert_held(&obj->base.dev->struct_mutex);
4350 if (!i915_gem_object_has_active_reference(obj) &&
4351 i915_gem_object_is_active(obj))
4352 i915_gem_object_set_active_reference(obj);
4354 i915_gem_object_put(obj);
4357 void i915_gem_sanitize(struct drm_i915_private *i915)
4359 intel_wakeref_t wakeref;
4363 wakeref = intel_runtime_pm_get(i915);
4364 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
4367 * As we have just resumed the machine and woken the device up from
4368 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
4369 * back to defaults, recovering from whatever wedged state we left it
4370 * in and so worth trying to use the device once more.
4372 if (i915_terminally_wedged(i915))
4373 i915_gem_unset_wedged(i915);
4376 * If we inherit context state from the BIOS or earlier occupants
4377 * of the GPU, the GPU may be in an inconsistent state when we
4378 * try to take over. The only way to remove the earlier state
4379 * is by resetting. However, resetting on earlier gen is tricky as
4380 * it may impact the display and we are uncertain about the stability
4381 * of the reset, so this could be applied to even earlier gen.
4383 intel_engines_sanitize(i915, false);
4385 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
4386 intel_runtime_pm_put(i915, wakeref);
4388 mutex_lock(&i915->drm.struct_mutex);
4389 i915_gem_contexts_lost(i915);
4390 mutex_unlock(&i915->drm.struct_mutex);
4393 void i915_gem_suspend(struct drm_i915_private *i915)
4395 intel_wakeref_t wakeref;
4399 wakeref = intel_runtime_pm_get(i915);
4400 intel_suspend_gt_powersave(i915);
4402 flush_workqueue(i915->wq);
4404 mutex_lock(&i915->drm.struct_mutex);
4407 * We have to flush all the executing contexts to main memory so
4408 * that they can saved in the hibernation image. To ensure the last
4409 * context image is coherent, we have to switch away from it. That
4410 * leaves the i915->kernel_context still active when
4411 * we actually suspend, and its image in memory may not match the GPU
4412 * state. Fortunately, the kernel_context is disposable and we do
4413 * not rely on its state.
4415 switch_to_kernel_context_sync(i915, i915->gt.active_engines);
4417 mutex_unlock(&i915->drm.struct_mutex);
4418 i915_reset_flush(i915);
4420 drain_delayed_work(&i915->gt.retire_work);
4423 * As the idle_work is rearming if it detects a race, play safe and
4424 * repeat the flush until it is definitely idle.
4426 drain_delayed_work(&i915->gt.idle_work);
4429 * Assert that we successfully flushed all the work and
4430 * reset the GPU back to its idle, low power state.
4432 GEM_BUG_ON(i915->gt.awake);
4434 intel_uc_suspend(i915);
4436 intel_runtime_pm_put(i915, wakeref);
4439 void i915_gem_suspend_late(struct drm_i915_private *i915)
4441 struct drm_i915_gem_object *obj;
4442 struct list_head *phases[] = {
4443 &i915->mm.unbound_list,
4444 &i915->mm.bound_list,
4449 * Neither the BIOS, ourselves or any other kernel
4450 * expects the system to be in execlists mode on startup,
4451 * so we need to reset the GPU back to legacy mode. And the only
4452 * known way to disable logical contexts is through a GPU reset.
4454 * So in order to leave the system in a known default configuration,
4455 * always reset the GPU upon unload and suspend. Afterwards we then
4456 * clean up the GEM state tracking, flushing off the requests and
4457 * leaving the system in a known idle state.
4459 * Note that is of the upmost importance that the GPU is idle and
4460 * all stray writes are flushed *before* we dismantle the backing
4461 * storage for the pinned objects.
4463 * However, since we are uncertain that resetting the GPU on older
4464 * machines is a good idea, we don't - just in case it leaves the
4465 * machine in an unusable condition.
4468 mutex_lock(&i915->drm.struct_mutex);
4469 for (phase = phases; *phase; phase++) {
4470 list_for_each_entry(obj, *phase, mm.link)
4471 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
4473 mutex_unlock(&i915->drm.struct_mutex);
4475 intel_uc_sanitize(i915);
4476 i915_gem_sanitize(i915);
4479 void i915_gem_resume(struct drm_i915_private *i915)
4483 WARN_ON(i915->gt.awake);
4485 mutex_lock(&i915->drm.struct_mutex);
4486 intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
4488 i915_gem_restore_gtt_mappings(i915);
4489 i915_gem_restore_fences(i915);
4492 * As we didn't flush the kernel context before suspend, we cannot
4493 * guarantee that the context image is complete. So let's just reset
4494 * it and start again.
4496 i915->gt.resume(i915);
4498 if (i915_gem_init_hw(i915))
4501 intel_uc_resume(i915);
4503 /* Always reload a context for powersaving. */
4504 if (!load_power_context(i915))
4508 intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
4509 mutex_unlock(&i915->drm.struct_mutex);
4513 if (!i915_reset_failed(i915)) {
4514 dev_err(i915->drm.dev,
4515 "Failed to re-initialize GPU, declaring it wedged!\n");
4516 i915_gem_set_wedged(i915);
4521 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
4523 if (INTEL_GEN(dev_priv) < 5 ||
4524 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4527 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4528 DISP_TILE_SURFACE_SWIZZLING);
4530 if (IS_GEN(dev_priv, 5))
4533 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4534 if (IS_GEN(dev_priv, 6))
4535 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4536 else if (IS_GEN(dev_priv, 7))
4537 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4538 else if (IS_GEN(dev_priv, 8))
4539 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4544 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4546 I915_WRITE(RING_CTL(base), 0);
4547 I915_WRITE(RING_HEAD(base), 0);
4548 I915_WRITE(RING_TAIL(base), 0);
4549 I915_WRITE(RING_START(base), 0);
4552 static void init_unused_rings(struct drm_i915_private *dev_priv)
4554 if (IS_I830(dev_priv)) {
4555 init_unused_ring(dev_priv, PRB1_BASE);
4556 init_unused_ring(dev_priv, SRB0_BASE);
4557 init_unused_ring(dev_priv, SRB1_BASE);
4558 init_unused_ring(dev_priv, SRB2_BASE);
4559 init_unused_ring(dev_priv, SRB3_BASE);
4560 } else if (IS_GEN(dev_priv, 2)) {
4561 init_unused_ring(dev_priv, SRB0_BASE);
4562 init_unused_ring(dev_priv, SRB1_BASE);
4563 } else if (IS_GEN(dev_priv, 3)) {
4564 init_unused_ring(dev_priv, PRB1_BASE);
4565 init_unused_ring(dev_priv, PRB2_BASE);
4569 static int __i915_gem_restart_engines(void *data)
4571 struct drm_i915_private *i915 = data;
4572 struct intel_engine_cs *engine;
4573 enum intel_engine_id id;
4576 for_each_engine(engine, i915, id) {
4577 err = engine->init_hw(engine);
4579 DRM_ERROR("Failed to restart %s (%d)\n",
4585 intel_engines_set_scheduler_caps(i915);
4590 int i915_gem_init_hw(struct drm_i915_private *dev_priv)
4594 dev_priv->gt.last_init_time = ktime_get();
4596 /* Double layer security blanket, see i915_gem_init() */
4597 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4599 if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
4600 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4602 if (IS_HASWELL(dev_priv))
4603 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
4604 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4606 /* Apply the GT workarounds... */
4607 intel_gt_apply_workarounds(dev_priv);
4608 /* ...and determine whether they are sticking. */
4609 intel_gt_verify_workarounds(dev_priv, "init");
4611 i915_gem_init_swizzling(dev_priv);
4614 * At least 830 can leave some of the unused rings
4615 * "active" (ie. head != tail) after resume which
4616 * will prevent c3 entry. Makes sure all unused rings
4619 init_unused_rings(dev_priv);
4621 BUG_ON(!dev_priv->kernel_context);
4622 ret = i915_terminally_wedged(dev_priv);
4626 ret = i915_ppgtt_init_hw(dev_priv);
4628 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
4632 ret = intel_wopcm_init_hw(&dev_priv->wopcm);
4634 DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
4638 /* We can't enable contexts until all firmware is loaded */
4639 ret = intel_uc_init_hw(dev_priv);
4641 DRM_ERROR("Enabling uc failed (%d)\n", ret);
4645 intel_mocs_init_l3cc_table(dev_priv);
4647 /* Only when the HW is re-initialised, can we replay the requests */
4648 ret = __i915_gem_restart_engines(dev_priv);
4652 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4657 intel_uc_fini_hw(dev_priv);
4659 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4664 static int __intel_engines_record_defaults(struct drm_i915_private *i915)
4666 struct i915_gem_context *ctx;
4667 struct intel_engine_cs *engine;
4668 enum intel_engine_id id;
4672 * As we reset the gpu during very early sanitisation, the current
4673 * register state on the GPU should reflect its defaults values.
4674 * We load a context onto the hw (with restore-inhibit), then switch
4675 * over to a second context to save that default register state. We
4676 * can then prime every new context with that state so they all start
4677 * from the same default HW values.
4680 ctx = i915_gem_context_create_kernel(i915, 0);
4682 return PTR_ERR(ctx);
4684 for_each_engine(engine, i915, id) {
4685 struct i915_request *rq;
4687 rq = i915_request_alloc(engine, ctx);
4694 if (engine->init_context)
4695 err = engine->init_context(rq);
4697 i915_request_add(rq);
4702 /* Flush the default context image to memory, and enable powersaving. */
4703 if (!load_power_context(i915)) {
4708 for_each_engine(engine, i915, id) {
4709 struct intel_context *ce;
4710 struct i915_vma *state;
4713 ce = intel_context_lookup(ctx, engine);
4721 GEM_BUG_ON(intel_context_is_pinned(ce));
4724 * As we will hold a reference to the logical state, it will
4725 * not be torn down with the context, and importantly the
4726 * object will hold onto its vma (making it possible for a
4727 * stray GTT write to corrupt our defaults). Unmap the vma
4728 * from the GTT to prevent such accidents and reclaim the
4731 err = i915_vma_unbind(state);
4735 err = i915_gem_object_set_to_cpu_domain(state->obj, false);
4739 engine->default_state = i915_gem_object_get(state->obj);
4740 i915_gem_object_set_cache_coherency(engine->default_state,
4743 /* Check we can acquire the image of the context state */
4744 vaddr = i915_gem_object_pin_map(engine->default_state,
4746 if (IS_ERR(vaddr)) {
4747 err = PTR_ERR(vaddr);
4751 i915_gem_object_unpin_map(engine->default_state);
4754 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
4755 unsigned int found = intel_engines_has_context_isolation(i915);
4758 * Make sure that classes with multiple engine instances all
4759 * share the same basic configuration.
4761 for_each_engine(engine, i915, id) {
4762 unsigned int bit = BIT(engine->uabi_class);
4763 unsigned int expected = engine->default_state ? bit : 0;
4765 if ((found & bit) != expected) {
4766 DRM_ERROR("mismatching default context state for class %d on engine %s\n",
4767 engine->uabi_class, engine->name);
4773 i915_gem_context_set_closed(ctx);
4774 i915_gem_context_put(ctx);
4779 * If we have to abandon now, we expect the engines to be idle
4780 * and ready to be torn-down. The quickest way we can accomplish
4781 * this is by declaring ourselves wedged.
4783 i915_gem_set_wedged(i915);
4788 i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
4790 struct drm_i915_gem_object *obj;
4791 struct i915_vma *vma;
4794 obj = i915_gem_object_create_stolen(i915, size);
4796 obj = i915_gem_object_create_internal(i915, size);
4798 DRM_ERROR("Failed to allocate scratch page\n");
4799 return PTR_ERR(obj);
4802 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
4808 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
4812 i915->gt.scratch = vma;
4816 i915_gem_object_put(obj);
4820 static void i915_gem_fini_scratch(struct drm_i915_private *i915)
4822 i915_vma_unpin_and_release(&i915->gt.scratch, 0);
4825 int i915_gem_init(struct drm_i915_private *dev_priv)
4829 /* We need to fallback to 4K pages if host doesn't support huge gtt. */
4830 if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
4831 mkwrite_device_info(dev_priv)->page_sizes =
4832 I915_GTT_PAGE_SIZE_4K;
4834 dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
4836 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
4837 dev_priv->gt.resume = intel_lr_context_resume;
4838 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4840 dev_priv->gt.resume = intel_legacy_submission_resume;
4841 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4844 i915_timelines_init(dev_priv);
4846 ret = i915_gem_init_userptr(dev_priv);
4850 ret = intel_uc_init_misc(dev_priv);
4854 ret = intel_wopcm_init(&dev_priv->wopcm);
4858 /* This is just a security blanket to placate dragons.
4859 * On some systems, we very sporadically observe that the first TLBs
4860 * used by the CS may be stale, despite us poking the TLB reset. If
4861 * we hold the forcewake during initialisation these problems
4862 * just magically go away.
4864 mutex_lock(&dev_priv->drm.struct_mutex);
4865 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
4867 ret = i915_gem_init_ggtt(dev_priv);
4869 GEM_BUG_ON(ret == -EIO);
4873 ret = i915_gem_init_scratch(dev_priv,
4874 IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
4876 GEM_BUG_ON(ret == -EIO);
4880 ret = i915_gem_contexts_init(dev_priv);
4882 GEM_BUG_ON(ret == -EIO);
4886 ret = intel_engines_init(dev_priv);
4888 GEM_BUG_ON(ret == -EIO);
4892 intel_init_gt_powersave(dev_priv);
4894 ret = intel_uc_init(dev_priv);
4898 ret = i915_gem_init_hw(dev_priv);
4903 * Despite its name intel_init_clock_gating applies both display
4904 * clock gating workarounds; GT mmio workarounds and the occasional
4905 * GT power context workaround. Worse, sometimes it includes a context
4906 * register workaround which we need to apply before we record the
4907 * default HW state for all contexts.
4909 * FIXME: break up the workarounds and apply them at the right time!
4911 intel_init_clock_gating(dev_priv);
4913 ret = __intel_engines_record_defaults(dev_priv);
4917 if (i915_inject_load_failure()) {
4922 if (i915_inject_load_failure()) {
4927 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4928 mutex_unlock(&dev_priv->drm.struct_mutex);
4933 * Unwinding is complicated by that we want to handle -EIO to mean
4934 * disable GPU submission but keep KMS alive. We want to mark the
4935 * HW as irrevisibly wedged, but keep enough state around that the
4936 * driver doesn't explode during runtime.
4939 mutex_unlock(&dev_priv->drm.struct_mutex);
4941 i915_gem_suspend(dev_priv);
4942 i915_gem_suspend_late(dev_priv);
4944 i915_gem_drain_workqueue(dev_priv);
4946 mutex_lock(&dev_priv->drm.struct_mutex);
4947 intel_uc_fini_hw(dev_priv);
4949 intel_uc_fini(dev_priv);
4952 intel_cleanup_gt_powersave(dev_priv);
4953 i915_gem_cleanup_engines(dev_priv);
4957 i915_gem_contexts_fini(dev_priv);
4959 i915_gem_fini_scratch(dev_priv);
4962 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
4963 mutex_unlock(&dev_priv->drm.struct_mutex);
4966 intel_uc_fini_misc(dev_priv);
4969 i915_gem_cleanup_userptr(dev_priv);
4970 i915_timelines_fini(dev_priv);
4974 mutex_lock(&dev_priv->drm.struct_mutex);
4977 * Allow engine initialisation to fail by marking the GPU as
4978 * wedged. But we only want to do this where the GPU is angry,
4979 * for all other failure, such as an allocation failure, bail.
4981 if (!i915_reset_failed(dev_priv)) {
4982 i915_load_error(dev_priv,
4983 "Failed to initialize GPU, declaring it wedged!\n");
4984 i915_gem_set_wedged(dev_priv);
4987 /* Minimal basic recovery for KMS */
4988 ret = i915_ggtt_enable_hw(dev_priv);
4989 i915_gem_restore_gtt_mappings(dev_priv);
4990 i915_gem_restore_fences(dev_priv);
4991 intel_init_clock_gating(dev_priv);
4993 mutex_unlock(&dev_priv->drm.struct_mutex);
4996 i915_gem_drain_freed_objects(dev_priv);
5000 void i915_gem_fini(struct drm_i915_private *dev_priv)
5002 i915_gem_suspend_late(dev_priv);
5003 intel_disable_gt_powersave(dev_priv);
5005 /* Flush any outstanding unpin_work. */
5006 i915_gem_drain_workqueue(dev_priv);
5008 mutex_lock(&dev_priv->drm.struct_mutex);
5009 intel_uc_fini_hw(dev_priv);
5010 intel_uc_fini(dev_priv);
5011 i915_gem_cleanup_engines(dev_priv);
5012 i915_gem_contexts_fini(dev_priv);
5013 i915_gem_fini_scratch(dev_priv);
5014 mutex_unlock(&dev_priv->drm.struct_mutex);
5016 intel_wa_list_free(&dev_priv->gt_wa_list);
5018 intel_cleanup_gt_powersave(dev_priv);
5020 intel_uc_fini_misc(dev_priv);
5021 i915_gem_cleanup_userptr(dev_priv);
5022 i915_timelines_fini(dev_priv);
5024 i915_gem_drain_freed_objects(dev_priv);
5026 WARN_ON(!list_empty(&dev_priv->contexts.list));
5029 void i915_gem_init_mmio(struct drm_i915_private *i915)
5031 i915_gem_sanitize(i915);
5035 i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
5037 struct intel_engine_cs *engine;
5038 enum intel_engine_id id;
5040 for_each_engine(engine, dev_priv, id)
5041 dev_priv->gt.cleanup_engine(engine);
5045 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5049 if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5050 !IS_CHERRYVIEW(dev_priv))
5051 dev_priv->num_fence_regs = 32;
5052 else if (INTEL_GEN(dev_priv) >= 4 ||
5053 IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
5054 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
5055 dev_priv->num_fence_regs = 16;
5057 dev_priv->num_fence_regs = 8;
5059 if (intel_vgpu_active(dev_priv))
5060 dev_priv->num_fence_regs =
5061 I915_READ(vgtif_reg(avail_rs.fence_num));
5063 /* Initialize fence registers to zero */
5064 for (i = 0; i < dev_priv->num_fence_regs; i++) {
5065 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
5067 fence->i915 = dev_priv;
5069 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
5071 i915_gem_restore_fences(dev_priv);
5073 i915_gem_detect_bit_6_swizzle(dev_priv);
5076 static void i915_gem_init__mm(struct drm_i915_private *i915)
5078 spin_lock_init(&i915->mm.object_stat_lock);
5079 spin_lock_init(&i915->mm.obj_lock);
5080 spin_lock_init(&i915->mm.free_lock);
5082 init_llist_head(&i915->mm.free_list);
5084 INIT_LIST_HEAD(&i915->mm.unbound_list);
5085 INIT_LIST_HEAD(&i915->mm.bound_list);
5086 INIT_LIST_HEAD(&i915->mm.fence_list);
5087 INIT_LIST_HEAD(&i915->mm.userfault_list);
5089 INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
5092 int i915_gem_init_early(struct drm_i915_private *dev_priv)
5096 INIT_LIST_HEAD(&dev_priv->gt.active_rings);
5097 INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
5099 i915_gem_init__mm(dev_priv);
5101 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
5102 i915_gem_retire_work_handler);
5103 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
5104 i915_gem_idle_work_handler);
5105 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
5106 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5107 mutex_init(&dev_priv->gpu_error.wedge_mutex);
5108 init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
5110 atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
5112 spin_lock_init(&dev_priv->fb_tracking.lock);
5114 err = i915_gemfs_init(dev_priv);
5116 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
5121 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
5123 i915_gem_drain_freed_objects(dev_priv);
5124 GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
5125 GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
5126 WARN_ON(dev_priv->mm.object_count);
5128 cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
5130 i915_gemfs_fini(dev_priv);
5133 int i915_gem_freeze(struct drm_i915_private *dev_priv)
5135 /* Discard all purgeable objects, let userspace recover those as
5136 * required after resuming.
5138 i915_gem_shrink_all(dev_priv);
5143 int i915_gem_freeze_late(struct drm_i915_private *i915)
5145 struct drm_i915_gem_object *obj;
5146 struct list_head *phases[] = {
5147 &i915->mm.unbound_list,
5148 &i915->mm.bound_list,
5153 * Called just before we write the hibernation image.
5155 * We need to update the domain tracking to reflect that the CPU
5156 * will be accessing all the pages to create and restore from the
5157 * hibernation, and so upon restoration those pages will be in the
5160 * To make sure the hibernation image contains the latest state,
5161 * we update that state just before writing out the image.
5163 * To try and reduce the hibernation image, we manually shrink
5164 * the objects as well, see i915_gem_freeze()
5167 i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
5168 i915_gem_drain_freed_objects(i915);
5170 mutex_lock(&i915->drm.struct_mutex);
5171 for (phase = phases; *phase; phase++) {
5172 list_for_each_entry(obj, *phase, mm.link)
5173 WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
5175 mutex_unlock(&i915->drm.struct_mutex);
5180 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5182 struct drm_i915_file_private *file_priv = file->driver_priv;
5183 struct i915_request *request;
5185 /* Clean up our request list when the client is going away, so that
5186 * later retire_requests won't dereference our soon-to-be-gone
5189 spin_lock(&file_priv->mm.lock);
5190 list_for_each_entry(request, &file_priv->mm.request_list, client_link)
5191 request->file_priv = NULL;
5192 spin_unlock(&file_priv->mm.lock);
5195 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
5197 struct drm_i915_file_private *file_priv;
5202 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5206 file->driver_priv = file_priv;
5207 file_priv->dev_priv = i915;
5208 file_priv->file = file;
5210 spin_lock_init(&file_priv->mm.lock);
5211 INIT_LIST_HEAD(&file_priv->mm.request_list);
5213 file_priv->bsd_engine = -1;
5214 file_priv->hang_timestamp = jiffies;
5216 ret = i915_gem_context_open(i915, file);
5224 * i915_gem_track_fb - update frontbuffer tracking
5225 * @old: current GEM buffer for the frontbuffer slots
5226 * @new: new GEM buffer for the frontbuffer slots
5227 * @frontbuffer_bits: bitmask of frontbuffer slots
5229 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5230 * from @old and setting them in @new. Both @old and @new can be NULL.
5232 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5233 struct drm_i915_gem_object *new,
5234 unsigned frontbuffer_bits)
5236 /* Control of individual bits within the mask are guarded by
5237 * the owning plane->mutex, i.e. we can never see concurrent
5238 * manipulation of individual bits. But since the bitfield as a whole
5239 * is updated using RMW, we need to use atomics in order to update
5242 BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
5243 BITS_PER_TYPE(atomic_t));
5246 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
5247 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
5251 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
5252 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
5256 /* Allocate a new GEM object and fill it with the supplied data */
5257 struct drm_i915_gem_object *
5258 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
5259 const void *data, size_t size)
5261 struct drm_i915_gem_object *obj;
5266 obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
5270 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
5272 file = obj->base.filp;
5275 unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
5277 void *pgdata, *vaddr;
5279 err = pagecache_write_begin(file, file->f_mapping,
5286 memcpy(vaddr, data, len);
5289 err = pagecache_write_end(file, file->f_mapping,
5303 i915_gem_object_put(obj);
5304 return ERR_PTR(err);
5307 struct scatterlist *
5308 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
5310 unsigned int *offset)
5312 struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
5313 struct scatterlist *sg;
5314 unsigned int idx, count;
5317 GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
5318 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
5320 /* As we iterate forward through the sg, we record each entry in a
5321 * radixtree for quick repeated (backwards) lookups. If we have seen
5322 * this index previously, we will have an entry for it.
5324 * Initial lookup is O(N), but this is amortized to O(1) for
5325 * sequential page access (where each new request is consecutive
5326 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
5327 * i.e. O(1) with a large constant!
5329 if (n < READ_ONCE(iter->sg_idx))
5332 mutex_lock(&iter->lock);
5334 /* We prefer to reuse the last sg so that repeated lookup of this
5335 * (or the subsequent) sg are fast - comparing against the last
5336 * sg is faster than going through the radixtree.
5341 count = __sg_page_count(sg);
5343 while (idx + count <= n) {
5348 /* If we cannot allocate and insert this entry, or the
5349 * individual pages from this range, cancel updating the
5350 * sg_idx so that on this lookup we are forced to linearly
5351 * scan onwards, but on future lookups we will try the
5352 * insertion again (in which case we need to be careful of
5353 * the error return reporting that we have already inserted
5356 ret = radix_tree_insert(&iter->radix, idx, sg);
5357 if (ret && ret != -EEXIST)
5360 entry = xa_mk_value(idx);
5361 for (i = 1; i < count; i++) {
5362 ret = radix_tree_insert(&iter->radix, idx + i, entry);
5363 if (ret && ret != -EEXIST)
5368 sg = ____sg_next(sg);
5369 count = __sg_page_count(sg);
5376 mutex_unlock(&iter->lock);
5378 if (unlikely(n < idx)) /* insertion completed by another thread */
5381 /* In case we failed to insert the entry into the radixtree, we need
5382 * to look beyond the current sg.
5384 while (idx + count <= n) {
5386 sg = ____sg_next(sg);
5387 count = __sg_page_count(sg);
5396 sg = radix_tree_lookup(&iter->radix, n);
5399 /* If this index is in the middle of multi-page sg entry,
5400 * the radix tree will contain a value entry that points
5401 * to the start of that range. We will return the pointer to
5402 * the base page and the offset of this page within the
5406 if (unlikely(xa_is_value(sg))) {
5407 unsigned long base = xa_to_value(sg);
5409 sg = radix_tree_lookup(&iter->radix, base);
5421 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
5423 struct scatterlist *sg;
5424 unsigned int offset;
5426 GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
5428 sg = i915_gem_object_get_sg(obj, n, &offset);
5429 return nth_page(sg_page(sg), offset);
5432 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
5434 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
5439 page = i915_gem_object_get_page(obj, n);
5441 set_page_dirty(page);
5447 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
5450 struct scatterlist *sg;
5451 unsigned int offset;
5453 sg = i915_gem_object_get_sg(obj, n, &offset);
5454 return sg_dma_address(sg) + (offset << PAGE_SHIFT);
5457 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
5459 struct sg_table *pages;
5462 if (align > obj->base.size)
5465 if (obj->ops == &i915_gem_phys_ops)
5468 if (obj->ops != &i915_gem_object_ops)
5471 err = i915_gem_object_unbind(obj);
5475 mutex_lock(&obj->mm.lock);
5477 if (obj->mm.madv != I915_MADV_WILLNEED) {
5482 if (obj->mm.quirked) {
5487 if (obj->mm.mapping) {
5492 pages = __i915_gem_object_unset_pages(obj);
5494 obj->ops = &i915_gem_phys_ops;
5496 err = ____i915_gem_object_get_pages(obj);
5500 /* Perma-pin (until release) the physical set of pages */
5501 __i915_gem_object_pin_pages(obj);
5503 if (!IS_ERR_OR_NULL(pages))
5504 i915_gem_object_ops.put_pages(obj, pages);
5505 mutex_unlock(&obj->mm.lock);
5509 obj->ops = &i915_gem_object_ops;
5510 if (!IS_ERR_OR_NULL(pages)) {
5511 unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);
5513 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
5516 mutex_unlock(&obj->mm.lock);
5520 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
5521 #include "selftests/scatterlist.c"
5522 #include "selftests/mock_gem_device.c"
5523 #include "selftests/huge_gem_object.c"
5524 #include "selftests/huge_pages.c"
5525 #include "selftests/i915_gem_object.c"
5526 #include "selftests/i915_gem_coherency.c"
5527 #include "selftests/i915_gem.c"