2 * SPDX-License-Identifier: MIT
4 * Copyright © 2008,2010 Intel Corporation
7 #include <linux/intel-iommu.h>
8 #include <linux/dma-resv.h>
9 #include <linux/sync_file.h>
10 #include <linux/uaccess.h>
12 #include <drm/drm_syncobj.h>
14 #include "display/intel_frontbuffer.h"
16 #include "gem/i915_gem_ioctls.h"
17 #include "gt/intel_context.h"
18 #include "gt/intel_engine_pool.h"
19 #include "gt/intel_gt.h"
20 #include "gt/intel_gt_pm.h"
21 #include "gt/intel_ring.h"
24 #include "i915_gem_clflush.h"
25 #include "i915_gem_context.h"
26 #include "i915_gem_ioctls.h"
27 #include "i915_sw_fence_work.h"
28 #include "i915_trace.h"
34 /** This vma's place in the execbuf reservation list */
35 struct drm_i915_gem_exec_object2 *exec;
36 struct list_head bind_link;
37 struct list_head reloc_link;
39 struct hlist_node node;
47 #define DBG_FORCE_RELOC 0 /* choose one of the above! */
50 #define __EXEC_OBJECT_HAS_PIN BIT(31)
51 #define __EXEC_OBJECT_HAS_FENCE BIT(30)
52 #define __EXEC_OBJECT_NEEDS_MAP BIT(29)
53 #define __EXEC_OBJECT_NEEDS_BIAS BIT(28)
54 #define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */
55 #define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
57 #define __EXEC_HAS_RELOC BIT(31)
58 #define __EXEC_INTERNAL_FLAGS (~0u << 31)
59 #define UPDATE PIN_OFFSET_FIXED
61 #define BATCH_OFFSET_BIAS (256*1024)
63 #define __I915_EXEC_ILLEGAL_FLAGS \
64 (__I915_EXEC_UNKNOWN_FLAGS | \
65 I915_EXEC_CONSTANTS_MASK | \
66 I915_EXEC_RESOURCE_STREAMER)
68 /* Catch emission of unexpected errors for CI! */
69 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
72 DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
78 * DOC: User command execution
80 * Userspace submits commands to be executed on the GPU as an instruction
81 * stream within a GEM object we call a batchbuffer. This instructions may
82 * refer to other GEM objects containing auxiliary state such as kernels,
83 * samplers, render targets and even secondary batchbuffers. Userspace does
84 * not know where in the GPU memory these objects reside and so before the
85 * batchbuffer is passed to the GPU for execution, those addresses in the
86 * batchbuffer and auxiliary objects are updated. This is known as relocation,
87 * or patching. To try and avoid having to relocate each object on the next
88 * execution, userspace is told the location of those objects in this pass,
89 * but this remains just a hint as the kernel may choose a new location for
90 * any object in the future.
92 * At the level of talking to the hardware, submitting a batchbuffer for the
93 * GPU to execute is to add content to a buffer from which the HW
94 * command streamer is reading.
96 * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
97 * Execlists, this command is not placed on the same buffer as the
100 * 2. Add a command to invalidate caches to the buffer.
102 * 3. Add a batchbuffer start command to the buffer; the start command is
103 * essentially a token together with the GPU address of the batchbuffer
106 * 4. Add a pipeline flush to the buffer.
108 * 5. Add a memory write command to the buffer to record when the GPU
109 * is done executing the batchbuffer. The memory write writes the
110 * global sequence number of the request, ``i915_request::global_seqno``;
111 * the i915 driver uses the current value in the register to determine
112 * if the GPU has completed the batchbuffer.
114 * 6. Add a user interrupt command to the buffer. This command instructs
115 * the GPU to issue an interrupt when the command, pipeline flush and
116 * memory write are completed.
118 * 7. Inform the hardware of the additional commands added to the buffer
119 * (by updating the tail pointer).
121 * Processing an execbuf ioctl is conceptually split up into a few phases.
123 * 1. Validation - Ensure all the pointers, handles and flags are valid.
124 * 2. Reservation - Assign GPU address space for every object
125 * 3. Relocation - Update any addresses to point to the final locations
126 * 4. Serialisation - Order the request with respect to its dependencies
127 * 5. Construction - Construct a request to execute the batchbuffer
128 * 6. Submission (at some point in the future execution)
130 * Reserving resources for the execbuf is the most complicated phase. We
131 * neither want to have to migrate the object in the address space, nor do
132 * we want to have to update any relocations pointing to this object. Ideally,
133 * we want to leave the object where it is and for all the existing relocations
134 * to match. If the object is given a new address, or if userspace thinks the
135 * object is elsewhere, we have to parse all the relocation entries and update
136 * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
137 * all the target addresses in all of its objects match the value in the
138 * relocation entries and that they all match the presumed offsets given by the
139 * list of execbuffer objects. Using this knowledge, we know that if we haven't
140 * moved any buffers, all the relocation entries are valid and we can skip
141 * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
142 * hang.) The requirement for using I915_EXEC_NO_RELOC are:
144 * The addresses written in the objects must match the corresponding
145 * reloc.presumed_offset which in turn must match the corresponding
148 * Any render targets written to in the batch must be flagged with
151 * To avoid stalling, execobject.offset should match the current
152 * address of that object within the active context.
154 * The reservation is done is multiple phases. First we try and keep any
155 * object already bound in its current location - so as long as meets the
156 * constraints imposed by the new execbuffer. Any object left unbound after the
157 * first pass is then fitted into any available idle space. If an object does
158 * not fit, all objects are removed from the reservation and the process rerun
159 * after sorting the objects into a priority order (more difficult to fit
160 * objects are tried first). Failing that, the entire VM is cleared and we try
161 * to fit the execbuf once last time before concluding that it simply will not
164 * A small complication to all of this is that we allow userspace not only to
165 * specify an alignment and a size for the object in the address space, but
166 * we also allow userspace to specify the exact offset. This objects are
167 * simpler to place (the location is known a priori) all we have to do is make
168 * sure the space is available.
170 * Once all the objects are in place, patching up the buried pointers to point
171 * to the final locations is a fairly simple job of walking over the relocation
172 * entry arrays, looking up the right address and rewriting the value into
173 * the object. Simple! ... The relocation entries are stored in user memory
174 * and so to access them we have to copy them into a local buffer. That copy
175 * has to avoid taking any pagefaults as they may lead back to a GEM object
176 * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
177 * the relocation into multiple passes. First we try to do everything within an
178 * atomic context (avoid the pagefaults) which requires that we never wait. If
179 * we detect that we may wait, or if we need to fault, then we have to fallback
180 * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
181 * bells yet?) Dropping the mutex means that we lose all the state we have
182 * built up so far for the execbuf and we must reset any global data. However,
183 * we do leave the objects pinned in their final locations - which is a
184 * potential issue for concurrent execbufs. Once we have left the mutex, we can
185 * allocate and copy all the relocation entries into a large array at our
186 * leisure, reacquire the mutex, reclaim all the objects and other state and
187 * then proceed to update any incorrect addresses with the objects.
189 * As we process the relocation entries, we maintain a record of whether the
190 * object is being written to. Using NORELOC, we expect userspace to provide
191 * this information instead. We also check whether we can skip the relocation
192 * by comparing the expected value inside the relocation entry with the target's
193 * final address. If they differ, we have to map the current object and rewrite
194 * the 4 or 8 byte pointer within.
196 * Serialising an execbuf is quite simple according to the rules of the GEM
197 * ABI. Execution within each context is ordered by the order of submission.
198 * Writes to any GEM object are in order of submission and are exclusive. Reads
199 * from a GEM object are unordered with respect to other reads, but ordered by
200 * writes. A write submitted after a read cannot occur before the read, and
201 * similarly any read submitted after a write cannot occur before the write.
202 * Writes are ordered between engines such that only one write occurs at any
203 * time (completing any reads beforehand) - using semaphores where available
204 * and CPU serialisation otherwise. Other GEM access obey the same rules, any
205 * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
206 * reads before starting, and any read (either using set-domain or pread) must
207 * flush all GPU writes before starting. (Note we only employ a barrier before,
208 * we currently rely on userspace not concurrently starting a new execution
209 * whilst reading or writing to an object. This may be an advantage or not
210 * depending on how much you trust userspace not to shoot themselves in the
211 * foot.) Serialisation may just result in the request being inserted into
212 * a DAG awaiting its turn, but most simple is to wait on the CPU until
213 * all dependencies are resolved.
215 * After all of that, is just a matter of closing the request and handing it to
216 * the hardware (well, leaving it in a queue to be executed). However, we also
217 * offer the ability for batchbuffers to be run with elevated privileges so
218 * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
219 * Before any batch is given extra privileges we first must check that it
220 * contains no nefarious instructions, we check that each instruction is from
221 * our whitelist and all registers are also from an allowed list. We first
222 * copy the user's batchbuffer to a shadow (so that the user doesn't have
223 * access to it, either by the CPU or GPU as we scan it) and then parse each
224 * instruction. If everything is ok, we set a flag telling the hardware to run
225 * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
228 struct i915_execbuffer {
229 struct drm_i915_private *i915; /** i915 backpointer */
230 struct drm_file *file; /** per-file lookup tables and limits */
231 struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
232 struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
235 struct intel_engine_cs *engine; /** engine to queue the request to */
236 struct intel_context *context; /* logical state for the request */
237 struct i915_gem_context *gem_context; /** caller's context */
239 struct i915_request *request; /** our request to build */
240 struct eb_vma *batch; /** identity of the batch obj/vma */
241 struct i915_vma *trampoline; /** trampoline used for chaining */
243 /** actual size of execobj[] as we may extend it for the cmdparser */
244 unsigned int buffer_count;
246 /** list of vma not yet bound during reservation phase */
247 struct list_head unbound;
249 /** list of vma that have execobj.relocation_count */
250 struct list_head relocs;
253 * Track the most recently used object for relocations, as we
254 * frequently have to perform multiple relocations within the same
258 struct drm_mm_node node; /** temporary GTT binding */
259 unsigned long vaddr; /** Current kmap address */
260 unsigned long page; /** Currently mapped page index */
261 unsigned int gen; /** Cached value of INTEL_GEN */
262 bool use_64bit_reloc : 1;
265 bool needs_unfenced : 1;
267 struct i915_request *rq;
269 unsigned int rq_size;
272 u64 invalid_flags; /** Set of execobj.flags that are invalid */
273 u32 context_flags; /** Set of execobj.flags to insert from the ctx */
275 u32 batch_start_offset; /** Location within object of batch */
276 u32 batch_len; /** Length of batch within object */
277 u32 batch_flags; /** Flags composed for emit_bb_start() */
280 * Indicate either the size of the hastable used to resolve
281 * relocation handles, or if negative that we are using a direct
282 * index into the execobj[].
285 struct hlist_head *buckets; /** ht for relocation handles */
288 static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
290 return intel_engine_requires_cmd_parser(eb->engine) ||
291 (intel_engine_using_cmd_parser(eb->engine) &&
292 eb->args->batch_len);
295 static int eb_create(struct i915_execbuffer *eb)
297 if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
298 unsigned int size = 1 + ilog2(eb->buffer_count);
301 * Without a 1:1 association between relocation handles and
302 * the execobject[] index, we instead create a hashtable.
303 * We size it dynamically based on available memory, starting
304 * first with 1:1 assocative hash and scaling back until
305 * the allocation succeeds.
307 * Later on we use a positive lut_size to indicate we are
308 * using this hashtable, and a negative value to indicate a
314 /* While we can still reduce the allocation size, don't
315 * raise a warning and allow the allocation to fail.
316 * On the last pass though, we want to try as hard
317 * as possible to perform the allocation and warn
322 flags |= __GFP_NORETRY | __GFP_NOWARN;
324 eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
335 eb->lut_size = -eb->buffer_count;
342 eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
343 const struct i915_vma *vma,
346 if (vma->node.size < entry->pad_to_size)
349 if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
352 if (flags & EXEC_OBJECT_PINNED &&
353 vma->node.start != entry->offset)
356 if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
357 vma->node.start < BATCH_OFFSET_BIAS)
360 if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
361 (vma->node.start + vma->node.size - 1) >> 32)
364 if (flags & __EXEC_OBJECT_NEEDS_MAP &&
365 !i915_vma_is_map_and_fenceable(vma))
372 eb_pin_vma(struct i915_execbuffer *eb,
373 const struct drm_i915_gem_exec_object2 *entry,
376 struct i915_vma *vma = ev->vma;
380 pin_flags = vma->node.start;
382 pin_flags = entry->offset & PIN_OFFSET_MASK;
384 pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
385 if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
386 pin_flags |= PIN_GLOBAL;
388 if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
391 if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
392 if (unlikely(i915_vma_pin_fence(vma))) {
398 ev->flags |= __EXEC_OBJECT_HAS_FENCE;
401 ev->flags |= __EXEC_OBJECT_HAS_PIN;
402 return !eb_vma_misplaced(entry, vma, ev->flags);
405 static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
407 GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
409 if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
410 __i915_vma_unpin_fence(vma);
412 __i915_vma_unpin(vma);
416 eb_unreserve_vma(struct eb_vma *ev)
418 if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
421 __eb_unreserve_vma(ev->vma, ev->flags);
422 ev->flags &= ~__EXEC_OBJECT_RESERVED;
426 eb_validate_vma(struct i915_execbuffer *eb,
427 struct drm_i915_gem_exec_object2 *entry,
428 struct i915_vma *vma)
430 if (unlikely(entry->flags & eb->invalid_flags))
433 if (unlikely(entry->alignment &&
434 !is_power_of_2_u64(entry->alignment)))
438 * Offset can be used as input (EXEC_OBJECT_PINNED), reject
439 * any non-page-aligned or non-canonical addresses.
441 if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
442 entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
445 /* pad_to_size was once a reserved field, so sanitize it */
446 if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
447 if (unlikely(offset_in_page(entry->pad_to_size)))
450 entry->pad_to_size = 0;
453 * From drm_mm perspective address space is continuous,
454 * so from this point we're always using non-canonical
457 entry->offset = gen8_noncanonical_addr(entry->offset);
459 if (!eb->reloc_cache.has_fence) {
460 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
462 if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
463 eb->reloc_cache.needs_unfenced) &&
464 i915_gem_object_is_tiled(vma->obj))
465 entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
468 if (!(entry->flags & EXEC_OBJECT_PINNED))
469 entry->flags |= eb->context_flags;
475 eb_add_vma(struct i915_execbuffer *eb,
476 unsigned int i, unsigned batch_idx,
477 struct i915_vma *vma)
479 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
480 struct eb_vma *ev = &eb->vma[i];
482 GEM_BUG_ON(i915_vma_is_closed(vma));
484 ev->vma = i915_vma_get(vma);
486 ev->flags = entry->flags;
488 if (eb->lut_size > 0) {
489 ev->handle = entry->handle;
490 hlist_add_head(&ev->node,
491 &eb->buckets[hash_32(entry->handle,
495 if (entry->relocation_count)
496 list_add_tail(&ev->reloc_link, &eb->relocs);
499 * SNA is doing fancy tricks with compressing batch buffers, which leads
500 * to negative relocation deltas. Usually that works out ok since the
501 * relocate address is still positive, except when the batch is placed
502 * very low in the GTT. Ensure this doesn't happen.
504 * Note that actual hangs have only been observed on gen7, but for
505 * paranoia do it everywhere.
507 if (i == batch_idx) {
508 if (entry->relocation_count &&
509 !(ev->flags & EXEC_OBJECT_PINNED))
510 ev->flags |= __EXEC_OBJECT_NEEDS_BIAS;
511 if (eb->reloc_cache.has_fence)
512 ev->flags |= EXEC_OBJECT_NEEDS_FENCE;
517 if (eb_pin_vma(eb, entry, ev)) {
518 if (entry->offset != vma->node.start) {
519 entry->offset = vma->node.start | UPDATE;
520 eb->args->flags |= __EXEC_HAS_RELOC;
523 eb_unreserve_vma(ev);
524 list_add_tail(&ev->bind_link, &eb->unbound);
528 static inline int use_cpu_reloc(const struct reloc_cache *cache,
529 const struct drm_i915_gem_object *obj)
531 if (!i915_gem_object_has_struct_page(obj))
534 if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
537 if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
540 return (cache->has_llc ||
542 obj->cache_level != I915_CACHE_NONE);
545 static int eb_reserve_vma(const struct i915_execbuffer *eb,
549 struct drm_i915_gem_exec_object2 *entry = ev->exec;
550 unsigned int exec_flags = ev->flags;
551 struct i915_vma *vma = ev->vma;
554 if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
555 pin_flags |= PIN_GLOBAL;
558 * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
559 * limit address to the first 4GBs for unflagged objects.
561 if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
562 pin_flags |= PIN_ZONE_4G;
564 if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
565 pin_flags |= PIN_MAPPABLE;
567 if (exec_flags & EXEC_OBJECT_PINNED)
568 pin_flags |= entry->offset | PIN_OFFSET_FIXED;
569 else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
570 pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
572 if (drm_mm_node_allocated(&vma->node) &&
573 eb_vma_misplaced(entry, vma, ev->flags)) {
574 err = i915_vma_unbind(vma);
579 err = i915_vma_pin(vma,
580 entry->pad_to_size, entry->alignment,
585 if (entry->offset != vma->node.start) {
586 entry->offset = vma->node.start | UPDATE;
587 eb->args->flags |= __EXEC_HAS_RELOC;
590 if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
591 err = i915_vma_pin_fence(vma);
598 exec_flags |= __EXEC_OBJECT_HAS_FENCE;
601 ev->flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
602 GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
607 static int eb_reserve(struct i915_execbuffer *eb)
609 const unsigned int count = eb->buffer_count;
610 unsigned int pin_flags = PIN_USER | PIN_NONBLOCK;
611 struct list_head last;
613 unsigned int i, pass;
617 * Attempt to pin all of the buffers into the GTT.
618 * This is done in 3 phases:
620 * 1a. Unbind all objects that do not match the GTT constraints for
621 * the execbuffer (fenceable, mappable, alignment etc).
622 * 1b. Increment pin count for already bound objects.
623 * 2. Bind new objects.
624 * 3. Decrement pin count.
626 * This avoid unnecessary unbinding of later objects in order to make
627 * room for the earlier objects *unless* we need to defragment.
633 list_for_each_entry(ev, &eb->unbound, bind_link) {
634 err = eb_reserve_vma(eb, ev, pin_flags);
638 if (!(err == -ENOSPC || err == -EAGAIN))
641 /* Resort *all* the objects into priority order */
642 INIT_LIST_HEAD(&eb->unbound);
643 INIT_LIST_HEAD(&last);
644 for (i = 0; i < count; i++) {
649 if (flags & EXEC_OBJECT_PINNED &&
650 flags & __EXEC_OBJECT_HAS_PIN)
653 eb_unreserve_vma(ev);
655 if (flags & EXEC_OBJECT_PINNED)
656 /* Pinned must have their slot */
657 list_add(&ev->bind_link, &eb->unbound);
658 else if (flags & __EXEC_OBJECT_NEEDS_MAP)
659 /* Map require the lowest 256MiB (aperture) */
660 list_add_tail(&ev->bind_link, &eb->unbound);
661 else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
662 /* Prioritise 4GiB region for restricted bo */
663 list_add(&ev->bind_link, &last);
665 list_add_tail(&ev->bind_link, &last);
667 list_splice_tail(&last, &eb->unbound);
669 if (err == -EAGAIN) {
670 flush_workqueue(eb->i915->mm.userptr_wq);
679 /* Too fragmented, unbind everything and retry */
680 mutex_lock(&eb->context->vm->mutex);
681 err = i915_gem_evict_vm(eb->context->vm);
682 mutex_unlock(&eb->context->vm->mutex);
691 pin_flags = PIN_USER;
695 static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
697 if (eb->args->flags & I915_EXEC_BATCH_FIRST)
700 return eb->buffer_count - 1;
703 static int eb_select_context(struct i915_execbuffer *eb)
705 struct i915_gem_context *ctx;
707 ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
711 eb->gem_context = ctx;
712 if (rcu_access_pointer(ctx->vm))
713 eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
715 eb->context_flags = 0;
716 if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
717 eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;
722 static int eb_lookup_vmas(struct i915_execbuffer *eb)
724 struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
725 struct drm_i915_gem_object *obj;
726 unsigned int i, batch;
729 if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
732 INIT_LIST_HEAD(&eb->relocs);
733 INIT_LIST_HEAD(&eb->unbound);
735 batch = eb_batch_index(eb);
737 for (i = 0; i < eb->buffer_count; i++) {
738 u32 handle = eb->exec[i].handle;
739 struct i915_lut_handle *lut;
740 struct i915_vma *vma;
742 vma = radix_tree_lookup(handles_vma, handle);
746 obj = i915_gem_object_lookup(eb->file, handle);
747 if (unlikely(!obj)) {
752 vma = i915_vma_instance(obj, eb->context->vm, NULL);
758 lut = i915_lut_handle_alloc();
759 if (unlikely(!lut)) {
764 err = radix_tree_insert(handles_vma, handle, vma);
766 i915_lut_handle_free(lut);
770 /* transfer ref to lut */
771 if (!atomic_fetch_inc(&vma->open_count))
772 i915_vma_reopen(vma);
773 lut->handle = handle;
774 lut->ctx = eb->gem_context;
776 i915_gem_object_lock(obj);
777 list_add(&lut->obj_link, &obj->lut_list);
778 i915_gem_object_unlock(obj);
781 err = eb_validate_vma(eb, &eb->exec[i], vma);
785 eb_add_vma(eb, i, batch, vma);
791 i915_gem_object_put(obj);
793 eb->vma[i].vma = NULL;
797 static struct eb_vma *
798 eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
800 if (eb->lut_size < 0) {
801 if (handle >= -eb->lut_size)
803 return &eb->vma[handle];
805 struct hlist_head *head;
808 head = &eb->buckets[hash_32(handle, eb->lut_size)];
809 hlist_for_each_entry(ev, head, node) {
810 if (ev->handle == handle)
817 static void eb_release_vmas(const struct i915_execbuffer *eb)
819 const unsigned int count = eb->buffer_count;
822 for (i = 0; i < count; i++) {
823 struct eb_vma *ev = &eb->vma[i];
824 struct i915_vma *vma = ev->vma;
829 eb->vma[i].vma = NULL;
831 if (ev->flags & __EXEC_OBJECT_HAS_PIN)
832 __eb_unreserve_vma(vma, ev->flags);
838 static void eb_destroy(const struct i915_execbuffer *eb)
840 GEM_BUG_ON(eb->reloc_cache.rq);
842 if (eb->lut_size > 0)
847 relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
848 const struct i915_vma *target)
850 return gen8_canonical_addr((int)reloc->delta + target->node.start);
853 static void reloc_cache_init(struct reloc_cache *cache,
854 struct drm_i915_private *i915)
858 /* Must be a variable in the struct to allow GCC to unroll. */
859 cache->gen = INTEL_GEN(i915);
860 cache->has_llc = HAS_LLC(i915);
861 cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
862 cache->has_fence = cache->gen < 4;
863 cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
864 cache->node.flags = 0;
869 static inline void *unmask_page(unsigned long p)
871 return (void *)(uintptr_t)(p & PAGE_MASK);
874 static inline unsigned int unmask_flags(unsigned long p)
876 return p & ~PAGE_MASK;
879 #define KMAP 0x4 /* after CLFLUSH_FLAGS */
881 static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
883 struct drm_i915_private *i915 =
884 container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
888 static void reloc_gpu_flush(struct reloc_cache *cache)
890 GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
891 cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
893 __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
894 i915_gem_object_unpin_map(cache->rq->batch->obj);
896 intel_gt_chipset_flush(cache->rq->engine->gt);
898 i915_request_add(cache->rq);
902 static void reloc_cache_reset(struct reloc_cache *cache)
907 reloc_gpu_flush(cache);
912 vaddr = unmask_page(cache->vaddr);
913 if (cache->vaddr & KMAP) {
914 if (cache->vaddr & CLFLUSH_AFTER)
917 kunmap_atomic(vaddr);
918 i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
920 struct i915_ggtt *ggtt = cache_to_ggtt(cache);
922 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
923 io_mapping_unmap_atomic((void __iomem *)vaddr);
925 if (drm_mm_node_allocated(&cache->node)) {
926 ggtt->vm.clear_range(&ggtt->vm,
929 mutex_lock(&ggtt->vm.mutex);
930 drm_mm_remove_node(&cache->node);
931 mutex_unlock(&ggtt->vm.mutex);
933 i915_vma_unpin((struct i915_vma *)cache->node.mm);
941 static void *reloc_kmap(struct drm_i915_gem_object *obj,
942 struct reloc_cache *cache,
948 kunmap_atomic(unmask_page(cache->vaddr));
950 unsigned int flushes;
953 err = i915_gem_object_prepare_write(obj, &flushes);
957 BUILD_BUG_ON(KMAP & CLFLUSH_FLAGS);
958 BUILD_BUG_ON((KMAP | CLFLUSH_FLAGS) & PAGE_MASK);
960 cache->vaddr = flushes | KMAP;
961 cache->node.mm = (void *)obj;
966 vaddr = kmap_atomic(i915_gem_object_get_dirty_page(obj, page));
967 cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
973 static void *reloc_iomap(struct drm_i915_gem_object *obj,
974 struct reloc_cache *cache,
977 struct i915_ggtt *ggtt = cache_to_ggtt(cache);
978 unsigned long offset;
982 intel_gt_flush_ggtt_writes(ggtt->vm.gt);
983 io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
985 struct i915_vma *vma;
988 if (i915_gem_object_is_tiled(obj))
989 return ERR_PTR(-EINVAL);
991 if (use_cpu_reloc(cache, obj))
994 i915_gem_object_lock(obj);
995 err = i915_gem_object_set_to_gtt_domain(obj, true);
996 i915_gem_object_unlock(obj);
1000 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1002 PIN_NONBLOCK /* NOWARN */ |
1005 memset(&cache->node, 0, sizeof(cache->node));
1006 mutex_lock(&ggtt->vm.mutex);
1007 err = drm_mm_insert_node_in_range
1008 (&ggtt->vm.mm, &cache->node,
1009 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
1010 0, ggtt->mappable_end,
1012 mutex_unlock(&ggtt->vm.mutex);
1013 if (err) /* no inactive aperture space, use cpu reloc */
1016 cache->node.start = vma->node.start;
1017 cache->node.mm = (void *)vma;
1021 offset = cache->node.start;
1022 if (drm_mm_node_allocated(&cache->node)) {
1023 ggtt->vm.insert_page(&ggtt->vm,
1024 i915_gem_object_get_dma_address(obj, page),
1025 offset, I915_CACHE_NONE, 0);
1027 offset += page << PAGE_SHIFT;
1030 vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
1033 cache->vaddr = (unsigned long)vaddr;
1038 static void *reloc_vaddr(struct drm_i915_gem_object *obj,
1039 struct reloc_cache *cache,
1044 if (cache->page == page) {
1045 vaddr = unmask_page(cache->vaddr);
1048 if ((cache->vaddr & KMAP) == 0)
1049 vaddr = reloc_iomap(obj, cache, page);
1051 vaddr = reloc_kmap(obj, cache, page);
1057 static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
1059 if (unlikely(flushes & (CLFLUSH_BEFORE | CLFLUSH_AFTER))) {
1060 if (flushes & CLFLUSH_BEFORE) {
1068 * Writes to the same cacheline are serialised by the CPU
1069 * (including clflush). On the write path, we only require
1070 * that it hits memory in an orderly fashion and place
1071 * mb barriers at the start and end of the relocation phase
1072 * to ensure ordering of clflush wrt to the system.
1074 if (flushes & CLFLUSH_AFTER)
1080 static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
1082 struct drm_i915_gem_object *obj = vma->obj;
1087 if (obj->cache_dirty & ~obj->cache_coherent)
1088 i915_gem_clflush_object(obj, 0);
1089 obj->write_domain = 0;
1091 err = i915_request_await_object(rq, vma->obj, true);
1093 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1095 i915_vma_unlock(vma);
1100 static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
1101 struct i915_vma *vma,
1104 struct reloc_cache *cache = &eb->reloc_cache;
1105 struct intel_engine_pool_node *pool;
1106 struct i915_request *rq;
1107 struct i915_vma *batch;
1111 pool = intel_engine_get_pool(eb->engine, PAGE_SIZE);
1113 return PTR_ERR(pool);
1115 cmd = i915_gem_object_pin_map(pool->obj,
1124 batch = i915_vma_instance(pool->obj, vma->vm, NULL);
1125 if (IS_ERR(batch)) {
1126 err = PTR_ERR(batch);
1130 err = i915_vma_pin(batch, 0, 0, PIN_USER | PIN_NONBLOCK);
1134 rq = i915_request_create(eb->context);
1140 err = intel_engine_pool_mark_active(pool, rq);
1144 err = reloc_move_to_gpu(rq, vma);
1148 err = eb->engine->emit_bb_start(rq,
1149 batch->node.start, PAGE_SIZE,
1150 cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
1154 i915_vma_lock(batch);
1155 err = i915_request_await_object(rq, batch->obj, false);
1157 err = i915_vma_move_to_active(batch, rq, 0);
1158 i915_vma_unlock(batch);
1163 i915_vma_unpin(batch);
1166 cache->rq_cmd = cmd;
1169 /* Return with batch mapping (cmd) still pinned */
1173 i915_request_set_error_once(rq, err);
1175 i915_request_add(rq);
1177 i915_vma_unpin(batch);
1179 i915_gem_object_unpin_map(pool->obj);
1181 intel_engine_pool_put(pool);
1185 static u32 *reloc_gpu(struct i915_execbuffer *eb,
1186 struct i915_vma *vma,
1189 struct reloc_cache *cache = &eb->reloc_cache;
1192 if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
1193 reloc_gpu_flush(cache);
1195 if (unlikely(!cache->rq)) {
1198 if (!intel_engine_can_store_dword(eb->engine))
1199 return ERR_PTR(-ENODEV);
1201 err = __reloc_gpu_alloc(eb, vma, len);
1203 return ERR_PTR(err);
1206 cmd = cache->rq_cmd + cache->rq_size;
1207 cache->rq_size += len;
1213 relocate_entry(struct i915_vma *vma,
1214 const struct drm_i915_gem_relocation_entry *reloc,
1215 struct i915_execbuffer *eb,
1216 const struct i915_vma *target)
1218 u64 offset = reloc->offset;
1219 u64 target_offset = relocation_target(reloc, target);
1220 bool wide = eb->reloc_cache.use_64bit_reloc;
1223 if (!eb->reloc_cache.vaddr &&
1224 (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
1225 !dma_resv_test_signaled_rcu(vma->resv, true))) {
1226 const unsigned int gen = eb->reloc_cache.gen;
1232 len = offset & 7 ? 8 : 5;
1238 batch = reloc_gpu(eb, vma, len);
1242 addr = gen8_canonical_addr(vma->node.start + offset);
1245 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1246 *batch++ = lower_32_bits(addr);
1247 *batch++ = upper_32_bits(addr);
1248 *batch++ = lower_32_bits(target_offset);
1250 addr = gen8_canonical_addr(addr + 4);
1252 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1253 *batch++ = lower_32_bits(addr);
1254 *batch++ = upper_32_bits(addr);
1255 *batch++ = upper_32_bits(target_offset);
1257 *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
1258 *batch++ = lower_32_bits(addr);
1259 *batch++ = upper_32_bits(addr);
1260 *batch++ = lower_32_bits(target_offset);
1261 *batch++ = upper_32_bits(target_offset);
1263 } else if (gen >= 6) {
1264 *batch++ = MI_STORE_DWORD_IMM_GEN4;
1267 *batch++ = target_offset;
1268 } else if (gen >= 4) {
1269 *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1272 *batch++ = target_offset;
1274 *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
1276 *batch++ = target_offset;
1283 vaddr = reloc_vaddr(vma->obj, &eb->reloc_cache, offset >> PAGE_SHIFT);
1285 return PTR_ERR(vaddr);
1287 clflush_write32(vaddr + offset_in_page(offset),
1288 lower_32_bits(target_offset),
1289 eb->reloc_cache.vaddr);
1292 offset += sizeof(u32);
1293 target_offset >>= 32;
1299 return target->node.start | UPDATE;
1303 eb_relocate_entry(struct i915_execbuffer *eb,
1305 const struct drm_i915_gem_relocation_entry *reloc)
1307 struct drm_i915_private *i915 = eb->i915;
1308 struct eb_vma *target;
1311 /* we've already hold a reference to all valid objects */
1312 target = eb_get_vma(eb, reloc->target_handle);
1313 if (unlikely(!target))
1316 /* Validate that the target is in a valid r/w GPU domain */
1317 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
1318 drm_dbg(&i915->drm, "reloc with multiple write domains: "
1319 "target %d offset %d "
1320 "read %08x write %08x",
1321 reloc->target_handle,
1322 (int) reloc->offset,
1323 reloc->read_domains,
1324 reloc->write_domain);
1327 if (unlikely((reloc->write_domain | reloc->read_domains)
1328 & ~I915_GEM_GPU_DOMAINS)) {
1329 drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: "
1330 "target %d offset %d "
1331 "read %08x write %08x",
1332 reloc->target_handle,
1333 (int) reloc->offset,
1334 reloc->read_domains,
1335 reloc->write_domain);
1339 if (reloc->write_domain) {
1340 target->flags |= EXEC_OBJECT_WRITE;
1343 * Sandybridge PPGTT errata: We need a global gtt mapping
1344 * for MI and pipe_control writes because the gpu doesn't
1345 * properly redirect them through the ppgtt for non_secure
1348 if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
1349 IS_GEN(eb->i915, 6)) {
1350 err = i915_vma_bind(target->vma,
1351 target->vma->obj->cache_level,
1354 "Unexpected failure to bind target VMA!"))
1360 * If the relocation already has the right value in it, no
1361 * more work needs to be done.
1363 if (!DBG_FORCE_RELOC &&
1364 gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
1367 /* Check that the relocation address is valid... */
1368 if (unlikely(reloc->offset >
1369 ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
1370 drm_dbg(&i915->drm, "Relocation beyond object bounds: "
1371 "target %d offset %d size %d.\n",
1372 reloc->target_handle,
1374 (int)ev->vma->size);
1377 if (unlikely(reloc->offset & 3)) {
1378 drm_dbg(&i915->drm, "Relocation not 4-byte aligned: "
1379 "target %d offset %d.\n",
1380 reloc->target_handle,
1381 (int)reloc->offset);
1386 * If we write into the object, we need to force the synchronisation
1387 * barrier, either with an asynchronous clflush or if we executed the
1388 * patching using the GPU (though that should be serialised by the
1389 * timeline). To be completely sure, and since we are required to
1390 * do relocations we are already stalling, disable the user's opt
1391 * out of our synchronisation.
1393 ev->flags &= ~EXEC_OBJECT_ASYNC;
1395 /* and update the user's relocation entry */
1396 return relocate_entry(ev->vma, reloc, eb, target->vma);
1399 static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
1401 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
1402 struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
1403 struct drm_i915_gem_relocation_entry __user *urelocs;
1404 const struct drm_i915_gem_exec_object2 *entry = ev->exec;
1405 unsigned int remain;
1407 urelocs = u64_to_user_ptr(entry->relocs_ptr);
1408 remain = entry->relocation_count;
1409 if (unlikely(remain > N_RELOC(ULONG_MAX)))
1413 * We must check that the entire relocation array is safe
1414 * to read. However, if the array is not writable the user loses
1415 * the updated relocation values.
1417 if (unlikely(!access_ok(urelocs, remain*sizeof(*urelocs))))
1421 struct drm_i915_gem_relocation_entry *r = stack;
1422 unsigned int count =
1423 min_t(unsigned int, remain, ARRAY_SIZE(stack));
1424 unsigned int copied;
1427 * This is the fast path and we cannot handle a pagefault
1428 * whilst holding the struct mutex lest the user pass in the
1429 * relocations contained within a mmaped bo. For in such a case
1430 * we, the page fault handler would call i915_gem_fault() and
1431 * we would try to acquire the struct mutex again. Obviously
1432 * this is bad and so lockdep complains vehemently.
1434 pagefault_disable();
1435 copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
1437 if (unlikely(copied)) {
1444 u64 offset = eb_relocate_entry(eb, ev, r);
1446 if (likely(offset == 0)) {
1447 } else if ((s64)offset < 0) {
1448 remain = (int)offset;
1452 * Note that reporting an error now
1453 * leaves everything in an inconsistent
1454 * state as we have *already* changed
1455 * the relocation value inside the
1456 * object. As we have not changed the
1457 * reloc.presumed_offset or will not
1458 * change the execobject.offset, on the
1459 * call we may not rewrite the value
1460 * inside the object, leaving it
1461 * dangling and causing a GPU hang. Unless
1462 * userspace dynamically rebuilds the
1463 * relocations on each execbuf rather than
1464 * presume a static tree.
1466 * We did previously check if the relocations
1467 * were writable (access_ok), an error now
1468 * would be a strange race with mprotect,
1469 * having already demonstrated that we
1470 * can read from this userspace address.
1472 offset = gen8_canonical_addr(offset & ~UPDATE);
1473 if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) {
1478 } while (r++, --count);
1479 urelocs += ARRAY_SIZE(stack);
1482 reloc_cache_reset(&eb->reloc_cache);
1487 eb_relocate_vma_slow(struct i915_execbuffer *eb, struct eb_vma *ev)
1489 const struct drm_i915_gem_exec_object2 *entry = ev->exec;
1490 struct drm_i915_gem_relocation_entry *relocs =
1491 u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
1495 for (i = 0; i < entry->relocation_count; i++) {
1496 u64 offset = eb_relocate_entry(eb, ev, &relocs[i]);
1498 if ((s64)offset < 0) {
1505 reloc_cache_reset(&eb->reloc_cache);
1509 static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
1511 const char __user *addr, *end;
1513 char __maybe_unused c;
1515 size = entry->relocation_count;
1519 if (size > N_RELOC(ULONG_MAX))
1522 addr = u64_to_user_ptr(entry->relocs_ptr);
1523 size *= sizeof(struct drm_i915_gem_relocation_entry);
1524 if (!access_ok(addr, size))
1528 for (; addr < end; addr += PAGE_SIZE) {
1529 int err = __get_user(c, addr);
1533 return __get_user(c, end - 1);
1536 static int eb_copy_relocations(const struct i915_execbuffer *eb)
1538 struct drm_i915_gem_relocation_entry *relocs;
1539 const unsigned int count = eb->buffer_count;
1543 for (i = 0; i < count; i++) {
1544 const unsigned int nreloc = eb->exec[i].relocation_count;
1545 struct drm_i915_gem_relocation_entry __user *urelocs;
1547 unsigned long copied;
1552 err = check_relocations(&eb->exec[i]);
1556 urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
1557 size = nreloc * sizeof(*relocs);
1559 relocs = kvmalloc_array(size, 1, GFP_KERNEL);
1565 /* copy_from_user is limited to < 4GiB */
1569 min_t(u64, BIT_ULL(31), size - copied);
1571 if (__copy_from_user((char *)relocs + copied,
1572 (char __user *)urelocs + copied,
1577 } while (copied < size);
1580 * As we do not update the known relocation offsets after
1581 * relocating (due to the complexities in lock handling),
1582 * we need to mark them as invalid now so that we force the
1583 * relocation processing next time. Just in case the target
1584 * object is evicted and then rebound into its old
1585 * presumed_offset before the next execbuffer - if that
1586 * happened we would make the mistake of assuming that the
1587 * relocations were valid.
1589 if (!user_access_begin(urelocs, size))
1592 for (copied = 0; copied < nreloc; copied++)
1594 &urelocs[copied].presumed_offset,
1598 eb->exec[i].relocs_ptr = (uintptr_t)relocs;
1610 relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
1611 if (eb->exec[i].relocation_count)
1617 static int eb_prefault_relocations(const struct i915_execbuffer *eb)
1619 const unsigned int count = eb->buffer_count;
1622 for (i = 0; i < count; i++) {
1625 err = check_relocations(&eb->exec[i]);
1633 static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
1635 struct drm_device *dev = &eb->i915->drm;
1636 bool have_copy = false;
1641 if (signal_pending(current)) {
1646 mutex_unlock(&dev->struct_mutex);
1649 * We take 3 passes through the slowpatch.
1651 * 1 - we try to just prefault all the user relocation entries and
1652 * then attempt to reuse the atomic pagefault disabled fast path again.
1654 * 2 - we copy the user entries to a local buffer here outside of the
1655 * local and allow ourselves to wait upon any rendering before
1658 * 3 - we already have a local copy of the relocation entries, but
1659 * were interrupted (EAGAIN) whilst waiting for the objects, try again.
1662 err = eb_prefault_relocations(eb);
1663 } else if (!have_copy) {
1664 err = eb_copy_relocations(eb);
1665 have_copy = err == 0;
1671 mutex_lock(&dev->struct_mutex);
1675 /* A frequent cause for EAGAIN are currently unavailable client pages */
1676 flush_workqueue(eb->i915->mm.userptr_wq);
1678 err = i915_mutex_lock_interruptible(dev);
1680 mutex_lock(&dev->struct_mutex);
1684 GEM_BUG_ON(!eb->batch);
1686 list_for_each_entry(ev, &eb->relocs, reloc_link) {
1688 pagefault_disable();
1689 err = eb_relocate_vma(eb, ev);
1694 err = eb_relocate_vma_slow(eb, ev);
1701 * Leave the user relocations as are, this is the painfully slow path,
1702 * and we want to avoid the complication of dropping the lock whilst
1703 * having buffers reserved in the aperture and so causing spurious
1704 * ENOSPC for random operations.
1713 const unsigned int count = eb->buffer_count;
1716 for (i = 0; i < count; i++) {
1717 const struct drm_i915_gem_exec_object2 *entry =
1719 struct drm_i915_gem_relocation_entry *relocs;
1721 if (!entry->relocation_count)
1724 relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
1732 static int eb_relocate(struct i915_execbuffer *eb)
1736 mutex_lock(&eb->gem_context->mutex);
1737 err = eb_lookup_vmas(eb);
1738 mutex_unlock(&eb->gem_context->mutex);
1742 err = eb_reserve(eb);
1746 /* The objects are in their final locations, apply the relocations. */
1747 if (eb->args->flags & __EXEC_HAS_RELOC) {
1750 list_for_each_entry(ev, &eb->relocs, reloc_link) {
1751 if (eb_relocate_vma(eb, ev))
1752 return eb_relocate_slow(eb);
1759 static int eb_move_to_gpu(struct i915_execbuffer *eb)
1761 const unsigned int count = eb->buffer_count;
1762 struct ww_acquire_ctx acquire;
1766 ww_acquire_init(&acquire, &reservation_ww_class);
1768 for (i = 0; i < count; i++) {
1769 struct eb_vma *ev = &eb->vma[i];
1770 struct i915_vma *vma = ev->vma;
1772 err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
1773 if (err == -EDEADLK) {
1778 ww_mutex_unlock(&eb->vma[j].vma->resv->lock);
1780 swap(eb->vma[i], eb->vma[j]);
1783 err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
1789 ww_acquire_done(&acquire);
1792 struct eb_vma *ev = &eb->vma[i];
1793 struct i915_vma *vma = ev->vma;
1794 unsigned int flags = ev->flags;
1795 struct drm_i915_gem_object *obj = vma->obj;
1797 assert_vma_held(vma);
1799 if (flags & EXEC_OBJECT_CAPTURE) {
1800 struct i915_capture_list *capture;
1802 capture = kmalloc(sizeof(*capture), GFP_KERNEL);
1804 capture->next = eb->request->capture_list;
1806 eb->request->capture_list = capture;
1811 * If the GPU is not _reading_ through the CPU cache, we need
1812 * to make sure that any writes (both previous GPU writes from
1813 * before a change in snooping levels and normal CPU writes)
1814 * caught in that cache are flushed to main memory.
1817 * obj->cache_dirty &&
1818 * !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
1819 * but gcc's optimiser doesn't handle that as well and emits
1820 * two jumps instead of one. Maybe one day...
1822 if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
1823 if (i915_gem_clflush_object(obj, 0))
1824 flags &= ~EXEC_OBJECT_ASYNC;
1827 if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
1828 err = i915_request_await_object
1829 (eb->request, obj, flags & EXEC_OBJECT_WRITE);
1833 err = i915_vma_move_to_active(vma, eb->request, flags);
1835 i915_vma_unlock(vma);
1837 __eb_unreserve_vma(vma, flags);
1842 ww_acquire_fini(&acquire);
1849 /* Unconditionally flush any chipset caches (for streaming writes). */
1850 intel_gt_chipset_flush(eb->engine->gt);
1854 i915_request_set_error_once(eb->request, err);
1858 static int i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
1860 if (exec->flags & __I915_EXEC_ILLEGAL_FLAGS)
1863 /* Kernel clipping was a DRI1 misfeature */
1864 if (!(exec->flags & I915_EXEC_FENCE_ARRAY)) {
1865 if (exec->num_cliprects || exec->cliprects_ptr)
1869 if (exec->DR4 == 0xffffffff) {
1870 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1873 if (exec->DR1 || exec->DR4)
1876 if ((exec->batch_start_offset | exec->batch_len) & 0x7)
1882 static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
1887 if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
1888 drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n");
1892 cs = intel_ring_begin(rq, 4 * 2 + 2);
1896 *cs++ = MI_LOAD_REGISTER_IMM(4);
1897 for (i = 0; i < 4; i++) {
1898 *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
1902 intel_ring_advance(rq, cs);
1907 static struct i915_vma *
1908 shadow_batch_pin(struct drm_i915_gem_object *obj,
1909 struct i915_address_space *vm,
1912 struct i915_vma *vma;
1915 vma = i915_vma_instance(obj, vm, NULL);
1919 err = i915_vma_pin(vma, 0, 0, flags);
1921 return ERR_PTR(err);
1926 struct eb_parse_work {
1927 struct dma_fence_work base;
1928 struct intel_engine_cs *engine;
1929 struct i915_vma *batch;
1930 struct i915_vma *shadow;
1931 struct i915_vma *trampoline;
1932 unsigned int batch_offset;
1933 unsigned int batch_length;
1936 static int __eb_parse(struct dma_fence_work *work)
1938 struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
1940 return intel_engine_cmd_parser(pw->engine,
1948 static void __eb_parse_release(struct dma_fence_work *work)
1950 struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
1953 i915_active_release(&pw->trampoline->active);
1954 i915_active_release(&pw->shadow->active);
1955 i915_active_release(&pw->batch->active);
1958 static const struct dma_fence_work_ops eb_parse_ops = {
1961 .release = __eb_parse_release,
1964 static int eb_parse_pipeline(struct i915_execbuffer *eb,
1965 struct i915_vma *shadow,
1966 struct i915_vma *trampoline)
1968 struct eb_parse_work *pw;
1971 pw = kzalloc(sizeof(*pw), GFP_KERNEL);
1975 err = i915_active_acquire(&eb->batch->vma->active);
1979 err = i915_active_acquire(&shadow->active);
1984 err = i915_active_acquire(&trampoline->active);
1989 dma_fence_work_init(&pw->base, &eb_parse_ops);
1991 pw->engine = eb->engine;
1992 pw->batch = eb->batch->vma;
1993 pw->batch_offset = eb->batch_start_offset;
1994 pw->batch_length = eb->batch_len;
1995 pw->shadow = shadow;
1996 pw->trampoline = trampoline;
1998 err = dma_resv_lock_interruptible(pw->batch->resv, NULL);
2000 goto err_trampoline;
2002 err = dma_resv_reserve_shared(pw->batch->resv, 1);
2004 goto err_batch_unlock;
2006 /* Wait for all writes (and relocs) into the batch to complete */
2007 err = i915_sw_fence_await_reservation(&pw->base.chain,
2008 pw->batch->resv, NULL, false,
2011 goto err_batch_unlock;
2013 /* Keep the batch alive and unwritten as we parse */
2014 dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
2016 dma_resv_unlock(pw->batch->resv);
2018 /* Force execution to wait for completion of the parser */
2019 dma_resv_lock(shadow->resv, NULL);
2020 dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
2021 dma_resv_unlock(shadow->resv);
2023 dma_fence_work_commit(&pw->base);
2027 dma_resv_unlock(pw->batch->resv);
2030 i915_active_release(&trampoline->active);
2032 i915_active_release(&shadow->active);
2034 i915_active_release(&eb->batch->vma->active);
2040 static int eb_parse(struct i915_execbuffer *eb)
2042 struct drm_i915_private *i915 = eb->i915;
2043 struct intel_engine_pool_node *pool;
2044 struct i915_vma *shadow, *trampoline;
2048 if (!eb_use_cmdparser(eb))
2051 len = eb->batch_len;
2052 if (!CMDPARSER_USES_GGTT(eb->i915)) {
2054 * ppGTT backed shadow buffers must be mapped RO, to prevent
2055 * post-scan tampering
2057 if (!eb->context->vm->has_read_only) {
2059 "Cannot prevent post-scan tampering without RO capable vm\n");
2063 len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
2066 pool = intel_engine_get_pool(eb->engine, len);
2068 return PTR_ERR(pool);
2070 shadow = shadow_batch_pin(pool->obj, eb->context->vm, PIN_USER);
2071 if (IS_ERR(shadow)) {
2072 err = PTR_ERR(shadow);
2075 i915_gem_object_set_readonly(shadow->obj);
2078 if (CMDPARSER_USES_GGTT(eb->i915)) {
2079 trampoline = shadow;
2081 shadow = shadow_batch_pin(pool->obj,
2082 &eb->engine->gt->ggtt->vm,
2084 if (IS_ERR(shadow)) {
2085 err = PTR_ERR(shadow);
2086 shadow = trampoline;
2090 eb->batch_flags |= I915_DISPATCH_SECURE;
2093 err = eb_parse_pipeline(eb, shadow, trampoline);
2095 goto err_trampoline;
2097 eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
2098 eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
2099 eb->batch = &eb->vma[eb->buffer_count++];
2101 eb->trampoline = trampoline;
2102 eb->batch_start_offset = 0;
2104 shadow->private = pool;
2109 i915_vma_unpin(trampoline);
2111 i915_vma_unpin(shadow);
2113 intel_engine_pool_put(pool);
2118 add_to_client(struct i915_request *rq, struct drm_file *file)
2120 struct drm_i915_file_private *file_priv = file->driver_priv;
2122 rq->file_priv = file_priv;
2124 spin_lock(&file_priv->mm.lock);
2125 list_add_tail(&rq->client_link, &file_priv->mm.request_list);
2126 spin_unlock(&file_priv->mm.lock);
2129 static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
2133 err = eb_move_to_gpu(eb);
2137 if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
2138 err = i915_reset_gen7_sol_offsets(eb->request);
2144 * After we completed waiting for other engines (using HW semaphores)
2145 * then we can signal that this request/batch is ready to run. This
2146 * allows us to determine if the batch is still waiting on the GPU
2147 * or actually running by checking the breadcrumb.
2149 if (eb->engine->emit_init_breadcrumb) {
2150 err = eb->engine->emit_init_breadcrumb(eb->request);
2155 err = eb->engine->emit_bb_start(eb->request,
2157 eb->batch_start_offset,
2163 if (eb->trampoline) {
2164 GEM_BUG_ON(eb->batch_start_offset);
2165 err = eb->engine->emit_bb_start(eb->request,
2166 eb->trampoline->node.start +
2173 if (intel_context_nopreempt(eb->context))
2174 __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
2179 static int num_vcs_engines(const struct drm_i915_private *i915)
2181 return hweight64(INTEL_INFO(i915)->engine_mask &
2182 GENMASK_ULL(VCS0 + I915_MAX_VCS - 1, VCS0));
2186 * Find one BSD ring to dispatch the corresponding BSD command.
2187 * The engine index is returned.
2190 gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
2191 struct drm_file *file)
2193 struct drm_i915_file_private *file_priv = file->driver_priv;
2195 /* Check whether the file_priv has already selected one ring. */
2196 if ((int)file_priv->bsd_engine < 0)
2197 file_priv->bsd_engine =
2198 get_random_int() % num_vcs_engines(dev_priv);
2200 return file_priv->bsd_engine;
2203 static const enum intel_engine_id user_ring_map[] = {
2204 [I915_EXEC_DEFAULT] = RCS0,
2205 [I915_EXEC_RENDER] = RCS0,
2206 [I915_EXEC_BLT] = BCS0,
2207 [I915_EXEC_BSD] = VCS0,
2208 [I915_EXEC_VEBOX] = VECS0
2211 static struct i915_request *eb_throttle(struct intel_context *ce)
2213 struct intel_ring *ring = ce->ring;
2214 struct intel_timeline *tl = ce->timeline;
2215 struct i915_request *rq;
2218 * Completely unscientific finger-in-the-air estimates for suitable
2219 * maximum user request size (to avoid blocking) and then backoff.
2221 if (intel_ring_update_space(ring) >= PAGE_SIZE)
2225 * Find a request that after waiting upon, there will be at least half
2226 * the ring available. The hysteresis allows us to compete for the
2227 * shared ring and should mean that we sleep less often prior to
2228 * claiming our resources, but not so long that the ring completely
2229 * drains before we can submit our next request.
2231 list_for_each_entry(rq, &tl->requests, link) {
2232 if (rq->ring != ring)
2235 if (__intel_ring_space(rq->postfix,
2236 ring->emit, ring->size) > ring->size / 2)
2239 if (&rq->link == &tl->requests)
2240 return NULL; /* weird, we will check again later for real */
2242 return i915_request_get(rq);
2245 static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
2247 struct intel_timeline *tl;
2248 struct i915_request *rq;
2252 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
2253 * EIO if the GPU is already wedged.
2255 err = intel_gt_terminally_wedged(ce->engine->gt);
2259 if (unlikely(intel_context_is_banned(ce)))
2263 * Pinning the contexts may generate requests in order to acquire
2264 * GGTT space, so do this first before we reserve a seqno for
2267 err = intel_context_pin(ce);
2272 * Take a local wakeref for preparing to dispatch the execbuf as
2273 * we expect to access the hardware fairly frequently in the
2274 * process, and require the engine to be kept awake between accesses.
2275 * Upon dispatch, we acquire another prolonged wakeref that we hold
2276 * until the timeline is idle, which in turn releases the wakeref
2277 * taken on the engine, and the parent device.
2279 tl = intel_context_timeline_lock(ce);
2285 intel_context_enter(ce);
2286 rq = eb_throttle(ce);
2288 intel_context_timeline_unlock(tl);
2291 bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
2294 timeout = MAX_SCHEDULE_TIMEOUT;
2298 timeout = i915_request_wait(rq,
2299 I915_WAIT_INTERRUPTIBLE,
2301 i915_request_put(rq);
2304 err = nonblock ? -EWOULDBLOCK : timeout;
2309 eb->engine = ce->engine;
2314 mutex_lock(&tl->mutex);
2315 intel_context_exit(ce);
2316 intel_context_timeline_unlock(tl);
2318 intel_context_unpin(ce);
2322 static void eb_unpin_engine(struct i915_execbuffer *eb)
2324 struct intel_context *ce = eb->context;
2325 struct intel_timeline *tl = ce->timeline;
2327 mutex_lock(&tl->mutex);
2328 intel_context_exit(ce);
2329 mutex_unlock(&tl->mutex);
2331 intel_context_unpin(ce);
2335 eb_select_legacy_ring(struct i915_execbuffer *eb,
2336 struct drm_file *file,
2337 struct drm_i915_gem_execbuffer2 *args)
2339 struct drm_i915_private *i915 = eb->i915;
2340 unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
2342 if (user_ring_id != I915_EXEC_BSD &&
2343 (args->flags & I915_EXEC_BSD_MASK)) {
2345 "execbuf with non bsd ring but with invalid "
2346 "bsd dispatch flags: %d\n", (int)(args->flags));
2350 if (user_ring_id == I915_EXEC_BSD && num_vcs_engines(i915) > 1) {
2351 unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
2353 if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
2354 bsd_idx = gen8_dispatch_bsd_engine(i915, file);
2355 } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
2356 bsd_idx <= I915_EXEC_BSD_RING2) {
2357 bsd_idx >>= I915_EXEC_BSD_SHIFT;
2361 "execbuf with unknown bsd ring: %u\n",
2366 return _VCS(bsd_idx);
2369 if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
2370 drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n",
2375 return user_ring_map[user_ring_id];
2379 eb_pin_engine(struct i915_execbuffer *eb,
2380 struct drm_file *file,
2381 struct drm_i915_gem_execbuffer2 *args)
2383 struct intel_context *ce;
2387 if (i915_gem_context_user_engines(eb->gem_context))
2388 idx = args->flags & I915_EXEC_RING_MASK;
2390 idx = eb_select_legacy_ring(eb, file, args);
2392 ce = i915_gem_context_get_engine(eb->gem_context, idx);
2396 err = __eb_pin_engine(eb, ce);
2397 intel_context_put(ce);
2403 __free_fence_array(struct drm_syncobj **fences, unsigned int n)
2406 drm_syncobj_put(ptr_mask_bits(fences[n], 2));
2410 static struct drm_syncobj **
2411 get_fence_array(struct drm_i915_gem_execbuffer2 *args,
2412 struct drm_file *file)
2414 const unsigned long nfences = args->num_cliprects;
2415 struct drm_i915_gem_exec_fence __user *user;
2416 struct drm_syncobj **fences;
2420 if (!(args->flags & I915_EXEC_FENCE_ARRAY))
2423 /* Check multiplication overflow for access_ok() and kvmalloc_array() */
2424 BUILD_BUG_ON(sizeof(size_t) > sizeof(unsigned long));
2425 if (nfences > min_t(unsigned long,
2426 ULONG_MAX / sizeof(*user),
2427 SIZE_MAX / sizeof(*fences)))
2428 return ERR_PTR(-EINVAL);
2430 user = u64_to_user_ptr(args->cliprects_ptr);
2431 if (!access_ok(user, nfences * sizeof(*user)))
2432 return ERR_PTR(-EFAULT);
2434 fences = kvmalloc_array(nfences, sizeof(*fences),
2435 __GFP_NOWARN | GFP_KERNEL);
2437 return ERR_PTR(-ENOMEM);
2439 for (n = 0; n < nfences; n++) {
2440 struct drm_i915_gem_exec_fence fence;
2441 struct drm_syncobj *syncobj;
2443 if (__copy_from_user(&fence, user++, sizeof(fence))) {
2448 if (fence.flags & __I915_EXEC_FENCE_UNKNOWN_FLAGS) {
2453 syncobj = drm_syncobj_find(file, fence.handle);
2455 DRM_DEBUG("Invalid syncobj handle provided\n");
2460 BUILD_BUG_ON(~(ARCH_KMALLOC_MINALIGN - 1) &
2461 ~__I915_EXEC_FENCE_UNKNOWN_FLAGS);
2463 fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
2469 __free_fence_array(fences, n);
2470 return ERR_PTR(err);
2474 put_fence_array(struct drm_i915_gem_execbuffer2 *args,
2475 struct drm_syncobj **fences)
2478 __free_fence_array(fences, args->num_cliprects);
2482 await_fence_array(struct i915_execbuffer *eb,
2483 struct drm_syncobj **fences)
2485 const unsigned int nfences = eb->args->num_cliprects;
2489 for (n = 0; n < nfences; n++) {
2490 struct drm_syncobj *syncobj;
2491 struct dma_fence *fence;
2494 syncobj = ptr_unpack_bits(fences[n], &flags, 2);
2495 if (!(flags & I915_EXEC_FENCE_WAIT))
2498 fence = drm_syncobj_fence_get(syncobj);
2502 err = i915_request_await_dma_fence(eb->request, fence);
2503 dma_fence_put(fence);
2512 signal_fence_array(struct i915_execbuffer *eb,
2513 struct drm_syncobj **fences)
2515 const unsigned int nfences = eb->args->num_cliprects;
2516 struct dma_fence * const fence = &eb->request->fence;
2519 for (n = 0; n < nfences; n++) {
2520 struct drm_syncobj *syncobj;
2523 syncobj = ptr_unpack_bits(fences[n], &flags, 2);
2524 if (!(flags & I915_EXEC_FENCE_SIGNAL))
2527 drm_syncobj_replace_fence(syncobj, fence);
2531 static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
2533 struct i915_request *rq, *rn;
2535 list_for_each_entry_safe(rq, rn, &tl->requests, link)
2536 if (rq == end || !i915_request_retire(rq))
2540 static void eb_request_add(struct i915_execbuffer *eb)
2542 struct i915_request *rq = eb->request;
2543 struct intel_timeline * const tl = i915_request_timeline(rq);
2544 struct i915_sched_attr attr = {};
2545 struct i915_request *prev;
2547 lockdep_assert_held(&tl->mutex);
2548 lockdep_unpin_lock(&tl->mutex, rq->cookie);
2550 trace_i915_request_add(rq);
2552 prev = __i915_request_commit(rq);
2554 /* Check that the context wasn't destroyed before submission */
2555 if (likely(rcu_access_pointer(eb->context->gem_context))) {
2556 attr = eb->gem_context->sched;
2559 * Boost actual workloads past semaphores!
2561 * With semaphores we spin on one engine waiting for another,
2562 * simply to reduce the latency of starting our work when
2563 * the signaler completes. However, if there is any other
2564 * work that we could be doing on this engine instead, that
2565 * is better utilisation and will reduce the overall duration
2566 * of the current work. To avoid PI boosting a semaphore
2567 * far in the distance past over useful work, we keep a history
2568 * of any semaphore use along our dependency chain.
2570 if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
2571 attr.priority |= I915_PRIORITY_NOSEMAPHORE;
2574 * Boost priorities to new clients (new request flows).
2576 * Allow interactive/synchronous clients to jump ahead of
2577 * the bulk clients. (FQ_CODEL)
2579 if (list_empty(&rq->sched.signalers_list))
2580 attr.priority |= I915_PRIORITY_WAIT;
2582 /* Serialise with context_close via the add_to_timeline */
2583 i915_request_set_error_once(rq, -ENOENT);
2584 __i915_request_skip(rq);
2588 __i915_request_queue(rq, &attr);
2589 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
2591 /* Try to clean up the client's timeline after submitting the request */
2593 retire_requests(tl, prev);
2595 mutex_unlock(&tl->mutex);
2599 i915_gem_do_execbuffer(struct drm_device *dev,
2600 struct drm_file *file,
2601 struct drm_i915_gem_execbuffer2 *args,
2602 struct drm_i915_gem_exec_object2 *exec,
2603 struct drm_syncobj **fences)
2605 struct drm_i915_private *i915 = to_i915(dev);
2606 struct i915_execbuffer eb;
2607 struct dma_fence *in_fence = NULL;
2608 struct dma_fence *exec_fence = NULL;
2609 struct sync_file *out_fence = NULL;
2610 struct i915_vma *batch;
2611 int out_fence_fd = -1;
2614 BUILD_BUG_ON(__EXEC_INTERNAL_FLAGS & ~__I915_EXEC_ILLEGAL_FLAGS);
2615 BUILD_BUG_ON(__EXEC_OBJECT_INTERNAL_FLAGS &
2616 ~__EXEC_OBJECT_UNKNOWN_FLAGS);
2621 if (DBG_FORCE_RELOC || !(args->flags & I915_EXEC_NO_RELOC))
2622 args->flags |= __EXEC_HAS_RELOC;
2625 eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
2626 eb.vma[0].vma = NULL;
2628 eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
2629 reloc_cache_init(&eb.reloc_cache, eb.i915);
2631 eb.buffer_count = args->buffer_count;
2632 eb.batch_start_offset = args->batch_start_offset;
2633 eb.batch_len = args->batch_len;
2634 eb.trampoline = NULL;
2637 if (args->flags & I915_EXEC_SECURE) {
2638 if (INTEL_GEN(i915) >= 11)
2641 /* Return -EPERM to trigger fallback code on old binaries. */
2642 if (!HAS_SECURE_BATCHES(i915))
2645 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
2648 eb.batch_flags |= I915_DISPATCH_SECURE;
2650 if (args->flags & I915_EXEC_IS_PINNED)
2651 eb.batch_flags |= I915_DISPATCH_PINNED;
2653 if (args->flags & I915_EXEC_FENCE_IN) {
2654 in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
2659 if (args->flags & I915_EXEC_FENCE_SUBMIT) {
2665 exec_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
2672 if (args->flags & I915_EXEC_FENCE_OUT) {
2673 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
2674 if (out_fence_fd < 0) {
2676 goto err_exec_fence;
2680 err = eb_create(&eb);
2684 GEM_BUG_ON(!eb.lut_size);
2686 err = eb_select_context(&eb);
2690 err = eb_pin_engine(&eb, file, args);
2694 err = i915_mutex_lock_interruptible(dev);
2698 err = eb_relocate(&eb);
2701 * If the user expects the execobject.offset and
2702 * reloc.presumed_offset to be an exact match,
2703 * as for using NO_RELOC, then we cannot update
2704 * the execobject.offset until we have completed
2707 args->flags &= ~__EXEC_HAS_RELOC;
2711 if (unlikely(eb.batch->flags & EXEC_OBJECT_WRITE)) {
2713 "Attempting to use self-modifying batch buffer\n");
2718 if (range_overflows_t(u64,
2719 eb.batch_start_offset, eb.batch_len,
2720 eb.batch->vma->size)) {
2721 drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
2726 if (eb.batch_len == 0)
2727 eb.batch_len = eb.batch->vma->size - eb.batch_start_offset;
2729 err = eb_parse(&eb);
2734 * snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
2735 * batch" bit. Hence we need to pin secure batches into the global gtt.
2736 * hsw should have this fixed, but bdw mucks it up again. */
2737 batch = eb.batch->vma;
2738 if (eb.batch_flags & I915_DISPATCH_SECURE) {
2739 struct i915_vma *vma;
2742 * So on first glance it looks freaky that we pin the batch here
2743 * outside of the reservation loop. But:
2744 * - The batch is already pinned into the relevant ppgtt, so we
2745 * already have the backing storage fully allocated.
2746 * - No other BO uses the global gtt (well contexts, but meh),
2747 * so we don't really have issues with multiple objects not
2748 * fitting due to fragmentation.
2749 * So this is actually safe.
2751 vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0);
2760 /* All GPU relocation batches must be submitted prior to the user rq */
2761 GEM_BUG_ON(eb.reloc_cache.rq);
2763 /* Allocate a request for this batch buffer nice and early. */
2764 eb.request = i915_request_create(eb.context);
2765 if (IS_ERR(eb.request)) {
2766 err = PTR_ERR(eb.request);
2767 goto err_batch_unpin;
2771 err = i915_request_await_dma_fence(eb.request, in_fence);
2777 err = i915_request_await_execution(eb.request, exec_fence,
2778 eb.engine->bond_execute);
2784 err = await_fence_array(&eb, fences);
2789 if (out_fence_fd != -1) {
2790 out_fence = sync_file_create(&eb.request->fence);
2798 * Whilst this request exists, batch_obj will be on the
2799 * active_list, and so will hold the active reference. Only when this
2800 * request is retired will the the batch_obj be moved onto the
2801 * inactive_list and lose its active reference. Hence we do not need
2802 * to explicitly hold another reference here.
2804 eb.request->batch = batch;
2806 intel_engine_pool_mark_active(batch->private, eb.request);
2808 trace_i915_request_queue(eb.request, eb.batch_flags);
2809 err = eb_submit(&eb, batch);
2811 add_to_client(eb.request, file);
2812 i915_request_get(eb.request);
2813 eb_request_add(&eb);
2816 signal_fence_array(&eb, fences);
2820 fd_install(out_fence_fd, out_fence->file);
2821 args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */
2822 args->rsvd2 |= (u64)out_fence_fd << 32;
2825 fput(out_fence->file);
2828 i915_request_put(eb.request);
2831 if (eb.batch_flags & I915_DISPATCH_SECURE)
2832 i915_vma_unpin(batch);
2835 intel_engine_pool_put(batch->private);
2838 eb_release_vmas(&eb);
2840 i915_vma_unpin(eb.trampoline);
2841 mutex_unlock(&dev->struct_mutex);
2843 eb_unpin_engine(&eb);
2845 i915_gem_context_put(eb.gem_context);
2849 if (out_fence_fd != -1)
2850 put_unused_fd(out_fence_fd);
2852 dma_fence_put(exec_fence);
2854 dma_fence_put(in_fence);
2858 static size_t eb_element_size(void)
2860 return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
2863 static bool check_buffer_count(size_t count)
2865 const size_t sz = eb_element_size();
2868 * When using LUT_HANDLE, we impose a limit of INT_MAX for the lookup
2869 * array size (see eb_create()). Otherwise, we can accept an array as
2870 * large as can be addressed (though use large arrays at your peril)!
2873 return !(count < 1 || count > INT_MAX || count > SIZE_MAX / sz - 1);
2877 * Legacy execbuffer just creates an exec2 list from the original exec object
2878 * list array and passes it to the real function.
2881 i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
2882 struct drm_file *file)
2884 struct drm_i915_private *i915 = to_i915(dev);
2885 struct drm_i915_gem_execbuffer *args = data;
2886 struct drm_i915_gem_execbuffer2 exec2;
2887 struct drm_i915_gem_exec_object *exec_list = NULL;
2888 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
2889 const size_t count = args->buffer_count;
2893 if (!check_buffer_count(count)) {
2894 drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
2898 exec2.buffers_ptr = args->buffers_ptr;
2899 exec2.buffer_count = args->buffer_count;
2900 exec2.batch_start_offset = args->batch_start_offset;
2901 exec2.batch_len = args->batch_len;
2902 exec2.DR1 = args->DR1;
2903 exec2.DR4 = args->DR4;
2904 exec2.num_cliprects = args->num_cliprects;
2905 exec2.cliprects_ptr = args->cliprects_ptr;
2906 exec2.flags = I915_EXEC_RENDER;
2907 i915_execbuffer2_set_context_id(exec2, 0);
2909 err = i915_gem_check_execbuffer(&exec2);
2913 /* Copy in the exec list from userland */
2914 exec_list = kvmalloc_array(count, sizeof(*exec_list),
2915 __GFP_NOWARN | GFP_KERNEL);
2916 exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2917 __GFP_NOWARN | GFP_KERNEL);
2918 if (exec_list == NULL || exec2_list == NULL) {
2920 "Failed to allocate exec list for %d buffers\n",
2921 args->buffer_count);
2926 err = copy_from_user(exec_list,
2927 u64_to_user_ptr(args->buffers_ptr),
2928 sizeof(*exec_list) * count);
2930 drm_dbg(&i915->drm, "copy %d exec entries failed %d\n",
2931 args->buffer_count, err);
2937 for (i = 0; i < args->buffer_count; i++) {
2938 exec2_list[i].handle = exec_list[i].handle;
2939 exec2_list[i].relocation_count = exec_list[i].relocation_count;
2940 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
2941 exec2_list[i].alignment = exec_list[i].alignment;
2942 exec2_list[i].offset = exec_list[i].offset;
2943 if (INTEL_GEN(to_i915(dev)) < 4)
2944 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
2946 exec2_list[i].flags = 0;
2949 err = i915_gem_do_execbuffer(dev, file, &exec2, exec2_list, NULL);
2950 if (exec2.flags & __EXEC_HAS_RELOC) {
2951 struct drm_i915_gem_exec_object __user *user_exec_list =
2952 u64_to_user_ptr(args->buffers_ptr);
2954 /* Copy the new buffer offsets back to the user's exec list. */
2955 for (i = 0; i < args->buffer_count; i++) {
2956 if (!(exec2_list[i].offset & UPDATE))
2959 exec2_list[i].offset =
2960 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
2961 exec2_list[i].offset &= PIN_OFFSET_MASK;
2962 if (__copy_to_user(&user_exec_list[i].offset,
2963 &exec2_list[i].offset,
2964 sizeof(user_exec_list[i].offset)))
2975 i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
2976 struct drm_file *file)
2978 struct drm_i915_private *i915 = to_i915(dev);
2979 struct drm_i915_gem_execbuffer2 *args = data;
2980 struct drm_i915_gem_exec_object2 *exec2_list;
2981 struct drm_syncobj **fences = NULL;
2982 const size_t count = args->buffer_count;
2985 if (!check_buffer_count(count)) {
2986 drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
2990 err = i915_gem_check_execbuffer(args);
2994 /* Allocate an extra slot for use by the command parser */
2995 exec2_list = kvmalloc_array(count + 1, eb_element_size(),
2996 __GFP_NOWARN | GFP_KERNEL);
2997 if (exec2_list == NULL) {
2998 drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
3002 if (copy_from_user(exec2_list,
3003 u64_to_user_ptr(args->buffers_ptr),
3004 sizeof(*exec2_list) * count)) {
3005 drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count);
3010 if (args->flags & I915_EXEC_FENCE_ARRAY) {
3011 fences = get_fence_array(args, file);
3012 if (IS_ERR(fences)) {
3014 return PTR_ERR(fences);
3018 err = i915_gem_do_execbuffer(dev, file, args, exec2_list, fences);
3021 * Now that we have begun execution of the batchbuffer, we ignore
3022 * any new error after this point. Also given that we have already
3023 * updated the associated relocations, we try to write out the current
3024 * object locations irrespective of any error.
3026 if (args->flags & __EXEC_HAS_RELOC) {
3027 struct drm_i915_gem_exec_object2 __user *user_exec_list =
3028 u64_to_user_ptr(args->buffers_ptr);
3031 /* Copy the new buffer offsets back to the user's exec list. */
3033 * Note: count * sizeof(*user_exec_list) does not overflow,
3034 * because we checked 'count' in check_buffer_count().
3036 * And this range already got effectively checked earlier
3037 * when we did the "copy_from_user()" above.
3039 if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
3042 for (i = 0; i < args->buffer_count; i++) {
3043 if (!(exec2_list[i].offset & UPDATE))
3046 exec2_list[i].offset =
3047 gen8_canonical_addr(exec2_list[i].offset & PIN_OFFSET_MASK);
3048 unsafe_put_user(exec2_list[i].offset,
3049 &user_exec_list[i].offset,
3057 args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
3058 put_fence_array(args, fences);