2 * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
7 #include "display/intel_frontbuffer.h"
10 #include "i915_gem_clflush.h"
11 #include "i915_gem_gtt.h"
12 #include "i915_gem_ioctls.h"
13 #include "i915_gem_object.h"
16 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
19 * We manually flush the CPU domain so that we can override and
20 * force the flush for the display, and perform it asyncrhonously.
22 i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
24 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
25 obj->write_domain = 0;
28 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
30 if (!READ_ONCE(obj->pin_global))
33 i915_gem_object_lock(obj);
34 __i915_gem_object_flush_for_display(obj);
35 i915_gem_object_unlock(obj);
39 * Moves a single object to the WC read, and possibly write domain.
40 * @obj: object to act on
41 * @write: ask for write access or read only
43 * This function returns when the move is complete, including waiting on
47 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
51 assert_object_held(obj);
53 ret = i915_gem_object_wait(obj,
54 I915_WAIT_INTERRUPTIBLE |
55 (write ? I915_WAIT_ALL : 0),
56 MAX_SCHEDULE_TIMEOUT);
60 if (obj->write_domain == I915_GEM_DOMAIN_WC)
63 /* Flush and acquire obj->pages so that we are coherent through
64 * direct access in memory with previous cached writes through
65 * shmemfs and that our cache domain tracking remains valid.
66 * For example, if the obj->filp was moved to swap without us
67 * being notified and releasing the pages, we would mistakenly
68 * continue to assume that the obj remained out of the CPU cached
71 ret = i915_gem_object_pin_pages(obj);
75 i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
77 /* Serialise direct access to this object with the barriers for
78 * coherent writes from the GPU, by effectively invalidating the
79 * WC domain upon first access.
81 if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
84 /* It should now be out of any other write domains, and we can update
85 * the domain values for our changes.
87 GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
88 obj->read_domains |= I915_GEM_DOMAIN_WC;
90 obj->read_domains = I915_GEM_DOMAIN_WC;
91 obj->write_domain = I915_GEM_DOMAIN_WC;
95 i915_gem_object_unpin_pages(obj);
100 * Moves a single object to the GTT read, and possibly write domain.
101 * @obj: object to act on
102 * @write: ask for write access or read only
104 * This function returns when the move is complete, including waiting on
108 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
112 assert_object_held(obj);
114 ret = i915_gem_object_wait(obj,
115 I915_WAIT_INTERRUPTIBLE |
116 (write ? I915_WAIT_ALL : 0),
117 MAX_SCHEDULE_TIMEOUT);
121 if (obj->write_domain == I915_GEM_DOMAIN_GTT)
124 /* Flush and acquire obj->pages so that we are coherent through
125 * direct access in memory with previous cached writes through
126 * shmemfs and that our cache domain tracking remains valid.
127 * For example, if the obj->filp was moved to swap without us
128 * being notified and releasing the pages, we would mistakenly
129 * continue to assume that the obj remained out of the CPU cached
132 ret = i915_gem_object_pin_pages(obj);
136 i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
138 /* Serialise direct access to this object with the barriers for
139 * coherent writes from the GPU, by effectively invalidating the
140 * GTT domain upon first access.
142 if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
145 /* It should now be out of any other write domains, and we can update
146 * the domain values for our changes.
148 GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
149 obj->read_domains |= I915_GEM_DOMAIN_GTT;
151 obj->read_domains = I915_GEM_DOMAIN_GTT;
152 obj->write_domain = I915_GEM_DOMAIN_GTT;
153 obj->mm.dirty = true;
156 i915_gem_object_unpin_pages(obj);
161 * Changes the cache-level of an object across all VMA.
162 * @obj: object to act on
163 * @cache_level: new cache level to set for the object
165 * After this function returns, the object will be in the new cache-level
166 * across all GTT and the contents of the backing storage will be coherent,
167 * with respect to the new cache-level. In order to keep the backing storage
168 * coherent for all users, we only allow a single cache level to be set
169 * globally on the object and prevent it from being changed whilst the
170 * hardware is reading from the object. That is if the object is currently
171 * on the scanout it will be set to uncached (or equivalent display
172 * cache coherency) and all non-MOCS GPU access will also be uncached so
173 * that all direct access to the scanout remains coherent.
175 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
176 enum i915_cache_level cache_level)
178 struct i915_vma *vma;
181 assert_object_held(obj);
183 if (obj->cache_level == cache_level)
186 /* Inspect the list of currently bound VMA and unbind any that would
187 * be invalid given the new cache-level. This is principally to
188 * catch the issue of the CS prefetch crossing page boundaries and
189 * reading an invalid PTE on older architectures.
192 list_for_each_entry(vma, &obj->vma.list, obj_link) {
193 if (!drm_mm_node_allocated(&vma->node))
196 if (i915_vma_is_pinned(vma)) {
197 DRM_DEBUG("can not change the cache level of pinned objects\n");
201 if (!i915_vma_is_closed(vma) &&
202 i915_gem_valid_gtt_space(vma, cache_level))
205 ret = i915_vma_unbind(vma);
209 /* As unbinding may affect other elements in the
210 * obj->vma_list (due to side-effects from retiring
211 * an active vma), play safe and restart the iterator.
216 /* We can reuse the existing drm_mm nodes but need to change the
217 * cache-level on the PTE. We could simply unbind them all and
218 * rebind with the correct cache-level on next use. However since
219 * we already have a valid slot, dma mapping, pages etc, we may as
220 * rewrite the PTE in the belief that doing so tramples upon less
221 * state and so involves less work.
223 if (atomic_read(&obj->bind_count)) {
224 struct drm_i915_private *i915 = to_i915(obj->base.dev);
226 /* Before we change the PTE, the GPU must not be accessing it.
227 * If we wait upon the object, we know that all the bound
228 * VMA are no longer active.
230 ret = i915_gem_object_wait(obj,
231 I915_WAIT_INTERRUPTIBLE |
233 MAX_SCHEDULE_TIMEOUT);
237 if (!HAS_LLC(i915) && cache_level != I915_CACHE_NONE) {
238 intel_wakeref_t wakeref =
239 intel_runtime_pm_get(&i915->runtime_pm);
242 * Access to snoopable pages through the GTT is
243 * incoherent and on some machines causes a hard
244 * lockup. Relinquish the CPU mmaping to force
245 * userspace to refault in the pages and we can
246 * then double check if the GTT mapping is still
247 * valid for that pointer access.
249 ret = mutex_lock_interruptible(&i915->ggtt.vm.mutex);
251 intel_runtime_pm_put(&i915->runtime_pm,
256 if (obj->userfault_count)
257 __i915_gem_object_release_mmap(obj);
260 * As we no longer need a fence for GTT access,
261 * we can relinquish it now (and so prevent having
262 * to steal a fence from someone else on the next
263 * fence request). Note GPU activity would have
264 * dropped the fence as all snoopable access is
265 * supposed to be linear.
267 for_each_ggtt_vma(vma, obj) {
268 ret = i915_vma_revoke_fence(vma);
272 mutex_unlock(&i915->ggtt.vm.mutex);
273 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
278 * We either have incoherent backing store and
279 * so no GTT access or the architecture is fully
280 * coherent. In such cases, existing GTT mmaps
281 * ignore the cache bit in the PTE and we can
282 * rewrite it without confusing the GPU or having
283 * to force userspace to fault back in its mmaps.
287 list_for_each_entry(vma, &obj->vma.list, obj_link) {
288 if (!drm_mm_node_allocated(&vma->node))
291 ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
297 list_for_each_entry(vma, &obj->vma.list, obj_link)
298 vma->node.color = cache_level;
299 i915_gem_object_set_cache_coherency(obj, cache_level);
300 obj->cache_dirty = true; /* Always invalidate stale cachelines */
305 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
306 struct drm_file *file)
308 struct drm_i915_gem_caching *args = data;
309 struct drm_i915_gem_object *obj;
313 obj = i915_gem_object_lookup_rcu(file, args->handle);
319 switch (obj->cache_level) {
321 case I915_CACHE_L3_LLC:
322 args->caching = I915_CACHING_CACHED;
326 args->caching = I915_CACHING_DISPLAY;
330 args->caching = I915_CACHING_NONE;
338 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
339 struct drm_file *file)
341 struct drm_i915_private *i915 = to_i915(dev);
342 struct drm_i915_gem_caching *args = data;
343 struct drm_i915_gem_object *obj;
344 enum i915_cache_level level;
347 switch (args->caching) {
348 case I915_CACHING_NONE:
349 level = I915_CACHE_NONE;
351 case I915_CACHING_CACHED:
353 * Due to a HW issue on BXT A stepping, GPU stores via a
354 * snooped mapping may leave stale data in a corresponding CPU
355 * cacheline, whereas normally such cachelines would get
358 if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
361 level = I915_CACHE_LLC;
363 case I915_CACHING_DISPLAY:
364 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
370 obj = i915_gem_object_lookup(file, args->handle);
375 * The caching mode of proxy object is handled by its generator, and
376 * not allowed to be changed by userspace.
378 if (i915_gem_object_is_proxy(obj)) {
383 if (obj->cache_level == level)
386 ret = i915_gem_object_wait(obj,
387 I915_WAIT_INTERRUPTIBLE,
388 MAX_SCHEDULE_TIMEOUT);
392 ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
396 ret = i915_gem_object_lock_interruptible(obj);
398 ret = i915_gem_object_set_cache_level(obj, level);
399 i915_gem_object_unlock(obj);
401 mutex_unlock(&i915->drm.struct_mutex);
404 i915_gem_object_put(obj);
409 * Prepare buffer for display plane (scanout, cursors, etc). Can be called from
410 * an uninterruptible phase (modesetting) and allows any flushes to be pipelined
411 * (for pageflips). We only flush the caches while preparing the buffer for
412 * display, the callers are responsible for frontbuffer flush.
415 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
417 const struct i915_ggtt_view *view,
420 struct i915_vma *vma;
423 assert_object_held(obj);
425 /* Mark the global pin early so that we account for the
426 * display coherency whilst setting up the cache domains.
430 /* The display engine is not coherent with the LLC cache on gen6. As
431 * a result, we make sure that the pinning that is about to occur is
432 * done with uncached PTEs. This is lowest common denominator for all
435 * However for gen6+, we could do better by using the GFDT bit instead
436 * of uncaching, which would allow us to flush all the LLC-cached data
437 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
439 ret = i915_gem_object_set_cache_level(obj,
440 HAS_WT(to_i915(obj->base.dev)) ?
441 I915_CACHE_WT : I915_CACHE_NONE);
444 goto err_unpin_global;
447 /* As the user may map the buffer once pinned in the display plane
448 * (e.g. libkms for the bootup splash), we have to ensure that we
449 * always use map_and_fenceable for all scanout buffers. However,
450 * it may simply be too big to fit into mappable, in which case
451 * put it anyway and hope that userspace can cope (but always first
452 * try to preserve the existing ABI).
454 vma = ERR_PTR(-ENOSPC);
455 if ((flags & PIN_MAPPABLE) == 0 &&
456 (!view || view->type == I915_GGTT_VIEW_NORMAL))
457 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
462 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
464 goto err_unpin_global;
466 vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
468 __i915_gem_object_flush_for_display(obj);
470 /* It should now be out of any other write domains, and we can update
471 * the domain values for our changes.
473 obj->read_domains |= I915_GEM_DOMAIN_GTT;
482 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
484 struct drm_i915_private *i915 = to_i915(obj->base.dev);
485 struct i915_vma *vma;
487 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
489 mutex_lock(&i915->ggtt.vm.mutex);
490 for_each_ggtt_vma(vma, obj) {
491 if (!drm_mm_node_allocated(&vma->node))
494 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
496 mutex_unlock(&i915->ggtt.vm.mutex);
498 if (i915_gem_object_is_shrinkable(obj)) {
501 spin_lock_irqsave(&i915->mm.obj_lock, flags);
503 if (obj->mm.madv == I915_MADV_WILLNEED)
504 list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
506 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
511 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
513 struct drm_i915_gem_object *obj = vma->obj;
515 assert_object_held(obj);
517 if (WARN_ON(obj->pin_global == 0))
520 if (--obj->pin_global == 0)
521 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
523 /* Bump the LRU to try and avoid premature eviction whilst flipping */
524 i915_gem_object_bump_inactive_ggtt(obj);
530 * Moves a single object to the CPU read, and possibly write domain.
531 * @obj: object to act on
532 * @write: requesting write or read-only access
534 * This function returns when the move is complete, including waiting on
538 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
542 assert_object_held(obj);
544 ret = i915_gem_object_wait(obj,
545 I915_WAIT_INTERRUPTIBLE |
546 (write ? I915_WAIT_ALL : 0),
547 MAX_SCHEDULE_TIMEOUT);
551 i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
553 /* Flush the CPU cache if it's still invalid. */
554 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
555 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
556 obj->read_domains |= I915_GEM_DOMAIN_CPU;
559 /* It should now be out of any other write domains, and we can update
560 * the domain values for our changes.
562 GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
564 /* If we're writing through the CPU, then the GPU read domains will
565 * need to be invalidated at next use.
568 __start_cpu_write(obj);
574 * Called when user space prepares to use an object with the CPU, either
575 * through the mmap ioctl's mapping or a GTT mapping.
577 * @data: ioctl data blob
581 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
582 struct drm_file *file)
584 struct drm_i915_gem_set_domain *args = data;
585 struct drm_i915_gem_object *obj;
586 u32 read_domains = args->read_domains;
587 u32 write_domain = args->write_domain;
590 /* Only handle setting domains to types used by the CPU. */
591 if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
595 * Having something in the write domain implies it's in the read
596 * domain, and only that read domain. Enforce that in the request.
598 if (write_domain && read_domains != write_domain)
604 obj = i915_gem_object_lookup(file, args->handle);
609 * Already in the desired write domain? Nothing for us to do!
611 * We apply a little bit of cunning here to catch a broader set of
612 * no-ops. If obj->write_domain is set, we must be in the same
613 * obj->read_domains, and only that domain. Therefore, if that
614 * obj->write_domain matches the request read_domains, we are
615 * already in the same read/write domain and can skip the operation,
616 * without having to further check the requested write_domain.
618 if (READ_ONCE(obj->write_domain) == read_domains) {
624 * Try to flush the object off the GPU without holding the lock.
625 * We will repeat the flush holding the lock in the normal manner
626 * to catch cases where we are gazumped.
628 err = i915_gem_object_wait(obj,
629 I915_WAIT_INTERRUPTIBLE |
631 (write_domain ? I915_WAIT_ALL : 0),
632 MAX_SCHEDULE_TIMEOUT);
637 * Proxy objects do not control access to the backing storage, ergo
638 * they cannot be used as a means to manipulate the cache domain
639 * tracking for that backing storage. The proxy object is always
640 * considered to be outside of any cache domain.
642 if (i915_gem_object_is_proxy(obj)) {
648 * Flush and acquire obj->pages so that we are coherent through
649 * direct access in memory with previous cached writes through
650 * shmemfs and that our cache domain tracking remains valid.
651 * For example, if the obj->filp was moved to swap without us
652 * being notified and releasing the pages, we would mistakenly
653 * continue to assume that the obj remained out of the CPU cached
656 err = i915_gem_object_pin_pages(obj);
660 err = i915_gem_object_lock_interruptible(obj);
664 if (read_domains & I915_GEM_DOMAIN_WC)
665 err = i915_gem_object_set_to_wc_domain(obj, write_domain);
666 else if (read_domains & I915_GEM_DOMAIN_GTT)
667 err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
669 err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
671 /* And bump the LRU for this access */
672 i915_gem_object_bump_inactive_ggtt(obj);
674 i915_gem_object_unlock(obj);
677 intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
680 i915_gem_object_unpin_pages(obj);
682 i915_gem_object_put(obj);
687 * Pins the specified object's pages and synchronizes the object with
688 * GPU accesses. Sets needs_clflush to non-zero if the caller should
689 * flush the object from the CPU cache.
691 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
692 unsigned int *needs_clflush)
697 if (!i915_gem_object_has_struct_page(obj))
700 ret = i915_gem_object_lock_interruptible(obj);
704 ret = i915_gem_object_wait(obj,
705 I915_WAIT_INTERRUPTIBLE,
706 MAX_SCHEDULE_TIMEOUT);
710 ret = i915_gem_object_pin_pages(obj);
714 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
715 !static_cpu_has(X86_FEATURE_CLFLUSH)) {
716 ret = i915_gem_object_set_to_cpu_domain(obj, false);
723 i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
725 /* If we're not in the cpu read domain, set ourself into the gtt
726 * read domain and manually flush cachelines (if required). This
727 * optimizes for the case when the gpu will dirty the data
728 * anyway again before the next pread happens.
730 if (!obj->cache_dirty &&
731 !(obj->read_domains & I915_GEM_DOMAIN_CPU))
732 *needs_clflush = CLFLUSH_BEFORE;
735 /* return with the pages pinned */
739 i915_gem_object_unpin_pages(obj);
741 i915_gem_object_unlock(obj);
745 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
746 unsigned int *needs_clflush)
751 if (!i915_gem_object_has_struct_page(obj))
754 ret = i915_gem_object_lock_interruptible(obj);
758 ret = i915_gem_object_wait(obj,
759 I915_WAIT_INTERRUPTIBLE |
761 MAX_SCHEDULE_TIMEOUT);
765 ret = i915_gem_object_pin_pages(obj);
769 if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
770 !static_cpu_has(X86_FEATURE_CLFLUSH)) {
771 ret = i915_gem_object_set_to_cpu_domain(obj, true);
778 i915_gem_object_flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
780 /* If we're not in the cpu write domain, set ourself into the
781 * gtt write domain and manually flush cachelines (as required).
782 * This optimizes for the case when the gpu will use the data
783 * right away and we therefore have to clflush anyway.
785 if (!obj->cache_dirty) {
786 *needs_clflush |= CLFLUSH_AFTER;
789 * Same trick applies to invalidate partially written
790 * cachelines read before writing.
792 if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
793 *needs_clflush |= CLFLUSH_BEFORE;
797 intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
798 obj->mm.dirty = true;
799 /* return with the pages pinned */
803 i915_gem_object_unpin_pages(obj);
805 i915_gem_object_unlock(obj);