2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
39 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
60 * Regarding the creation of contexts, we have:
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
89 * Execlists implementation:
90 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
92 * This method works as follows:
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
134 #include <linux/interrupt.h>
136 #include <drm/drmP.h>
137 #include <drm/i915_drm.h>
138 #include "i915_drv.h"
139 #include "i915_gem_render_state.h"
140 #include "intel_lrc_reg.h"
141 #include "intel_mocs.h"
142 #include "intel_workarounds.h"
144 #define RING_EXECLIST_QFULL (1 << 0x2)
145 #define RING_EXECLIST1_VALID (1 << 0x3)
146 #define RING_EXECLIST0_VALID (1 << 0x4)
147 #define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
148 #define RING_EXECLIST1_ACTIVE (1 << 0x11)
149 #define RING_EXECLIST0_ACTIVE (1 << 0x12)
151 #define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
152 #define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
153 #define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
154 #define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
155 #define GEN8_CTX_STATUS_COMPLETE (1 << 4)
156 #define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
158 #define GEN8_CTX_STATUS_COMPLETED_MASK \
159 (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
161 /* Typical size of the average request (2 pipecontrols and a MI_BB) */
162 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */
163 #define WA_TAIL_DWORDS 2
164 #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
166 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
167 struct intel_engine_cs *engine);
168 static void execlists_init_reg_state(u32 *reg_state,
169 struct i915_gem_context *ctx,
170 struct intel_engine_cs *engine,
171 struct intel_ring *ring);
173 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
175 return rb_entry(rb, struct i915_priolist, node);
178 static inline int rq_prio(const struct i915_request *rq)
180 return rq->sched.attr.priority;
183 static inline bool need_preempt(const struct intel_engine_cs *engine,
184 const struct i915_request *last,
187 return (intel_engine_has_preemption(engine) &&
188 __execlists_need_preempt(prio, rq_prio(last)) &&
189 !i915_request_completed(last));
193 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
194 * descriptor for a pinned context
195 * @ctx: Context to work on
196 * @engine: Engine the descriptor will be used with
198 * The context descriptor encodes various attributes of a context,
199 * including its GTT address and some flags. Because it's fairly
200 * expensive to calculate, we'll just do it once and cache the result,
201 * which remains valid until the context is unpinned.
203 * This is what a descriptor looks like, from LSB to MSB::
205 * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
206 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
207 * bits 32-52: ctx ID, a globally unique tag
208 * bits 53-54: mbz, reserved for use by hardware
209 * bits 55-63: group ID, currently unused and set to 0
211 * Starting from Gen11, the upper dword of the descriptor has a new format:
213 * bits 32-36: reserved
214 * bits 37-47: SW context ID
215 * bits 48:53: engine instance
216 * bit 54: mbz, reserved for use by hardware
217 * bits 55-60: SW counter
218 * bits 61-63: engine class
220 * engine info, SW context ID and SW counter need to form a unique number
221 * (Context ID) per lrc.
224 intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
225 struct intel_engine_cs *engine)
227 struct intel_context *ce = to_intel_context(ctx, engine);
230 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
231 BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH)));
233 desc = ctx->desc_template; /* bits 0-11 */
234 GEM_BUG_ON(desc & GENMASK_ULL(63, 12));
236 desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE;
238 GEM_BUG_ON(desc & GENMASK_ULL(63, 32));
240 if (INTEL_GEN(ctx->i915) >= 11) {
241 GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
242 desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
245 desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT;
248 /* TODO: decide what to do with SW counter (bits 55-60) */
250 desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT;
253 GEM_BUG_ON(ctx->hw_id >= BIT(GEN8_CTX_ID_WIDTH));
254 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
260 static struct i915_priolist *
261 lookup_priolist(struct intel_engine_cs *engine, int prio)
263 struct intel_engine_execlists * const execlists = &engine->execlists;
264 struct i915_priolist *p;
265 struct rb_node **parent, *rb;
268 if (unlikely(execlists->no_priolist))
269 prio = I915_PRIORITY_NORMAL;
272 /* most positive priority is scheduled first, equal priorities fifo */
274 parent = &execlists->queue.rb_node;
278 if (prio > p->priority) {
279 parent = &rb->rb_left;
280 } else if (prio < p->priority) {
281 parent = &rb->rb_right;
288 if (prio == I915_PRIORITY_NORMAL) {
289 p = &execlists->default_priolist;
291 p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
292 /* Convert an allocation failure to a priority bump */
294 prio = I915_PRIORITY_NORMAL; /* recurses just once */
296 /* To maintain ordering with all rendering, after an
297 * allocation failure we have to disable all scheduling.
298 * Requests will then be executed in fifo, and schedule
299 * will ensure that dependencies are emitted in fifo.
300 * There will be still some reordering with existing
301 * requests, so if userspace lied about their
302 * dependencies that reordering may be visible.
304 execlists->no_priolist = true;
310 INIT_LIST_HEAD(&p->requests);
311 rb_link_node(&p->node, rb, parent);
312 rb_insert_color(&p->node, &execlists->queue);
315 execlists->first = &p->node;
320 static void unwind_wa_tail(struct i915_request *rq)
322 rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
323 assert_ring_tail_valid(rq->ring, rq->tail);
326 static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
328 struct i915_request *rq, *rn;
329 struct i915_priolist *uninitialized_var(p);
330 int last_prio = I915_PRIORITY_INVALID;
332 lockdep_assert_held(&engine->timeline.lock);
334 list_for_each_entry_safe_reverse(rq, rn,
335 &engine->timeline.requests,
337 if (i915_request_completed(rq))
340 __i915_request_unsubmit(rq);
343 GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
344 if (rq_prio(rq) != last_prio) {
345 last_prio = rq_prio(rq);
346 p = lookup_priolist(engine, last_prio);
349 GEM_BUG_ON(p->priority != rq_prio(rq));
350 list_add(&rq->sched.link, &p->requests);
355 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
357 struct intel_engine_cs *engine =
358 container_of(execlists, typeof(*engine), execlists);
360 spin_lock_irq(&engine->timeline.lock);
361 __unwind_incomplete_requests(engine);
362 spin_unlock_irq(&engine->timeline.lock);
366 execlists_context_status_change(struct i915_request *rq, unsigned long status)
369 * Only used when GVT-g is enabled now. When GVT-g is disabled,
370 * The compiler should eliminate this function as dead-code.
372 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
375 atomic_notifier_call_chain(&rq->engine->context_status_notifier,
380 execlists_user_begin(struct intel_engine_execlists *execlists,
381 const struct execlist_port *port)
383 execlists_set_active_once(execlists, EXECLISTS_ACTIVE_USER);
387 execlists_user_end(struct intel_engine_execlists *execlists)
389 execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
393 execlists_context_schedule_in(struct i915_request *rq)
395 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
396 intel_engine_context_in(rq->engine);
400 execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
402 intel_engine_context_out(rq->engine);
403 execlists_context_status_change(rq, status);
404 trace_i915_request_out(rq);
408 execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
410 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
411 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
412 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
413 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
416 static u64 execlists_update_context(struct i915_request *rq)
418 struct intel_context *ce = to_intel_context(rq->ctx, rq->engine);
419 struct i915_hw_ppgtt *ppgtt =
420 rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
421 u32 *reg_state = ce->lrc_reg_state;
423 reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
425 /* True 32b PPGTT with dynamic page allocation: update PDP
426 * registers and point the unallocated PDPs to scratch page.
427 * PML4 is allocated during ppgtt init, so this is not needed
430 if (ppgtt && !i915_vm_is_48bit(&ppgtt->base))
431 execlists_update_context_pdps(ppgtt, reg_state);
436 static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
438 if (execlists->ctrl_reg) {
439 writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
440 writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
442 writel(upper_32_bits(desc), execlists->submit_reg);
443 writel(lower_32_bits(desc), execlists->submit_reg);
447 static void execlists_submit_ports(struct intel_engine_cs *engine)
449 struct intel_engine_execlists *execlists = &engine->execlists;
450 struct execlist_port *port = execlists->port;
454 * ELSQ note: the submit queue is not cleared after being submitted
455 * to the HW so we need to make sure we always clean it up. This is
456 * currently ensured by the fact that we always write the same number
457 * of elsq entries, keep this in mind before changing the loop below.
459 for (n = execlists_num_ports(execlists); n--; ) {
460 struct i915_request *rq;
464 rq = port_unpack(&port[n], &count);
466 GEM_BUG_ON(count > !n);
468 execlists_context_schedule_in(rq);
469 port_set(&port[n], port_pack(rq, count));
470 desc = execlists_update_context(rq);
471 GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
473 GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
475 port[n].context_id, count,
477 rq->fence.context, rq->fence.seqno,
478 intel_engine_get_seqno(engine),
485 write_desc(execlists, desc, n);
488 /* we need to manually load the submit queue */
489 if (execlists->ctrl_reg)
490 writel(EL_CTRL_LOAD, execlists->ctrl_reg);
492 execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
495 static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
497 return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
498 i915_gem_context_force_single_submission(ctx));
501 static bool can_merge_ctx(const struct i915_gem_context *prev,
502 const struct i915_gem_context *next)
507 if (ctx_single_port_submission(prev))
513 static void port_assign(struct execlist_port *port, struct i915_request *rq)
515 GEM_BUG_ON(rq == port_request(port));
517 if (port_isset(port))
518 i915_request_put(port_request(port));
520 port_set(port, port_pack(i915_request_get(rq), port_count(port)));
523 static void inject_preempt_context(struct intel_engine_cs *engine)
525 struct intel_engine_execlists *execlists = &engine->execlists;
526 struct intel_context *ce =
527 to_intel_context(engine->i915->preempt_context, engine);
530 GEM_BUG_ON(execlists->preempt_complete_status !=
531 upper_32_bits(ce->lrc_desc));
532 GEM_BUG_ON((ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1] &
533 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
534 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)) !=
535 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
536 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT));
539 * Switch to our empty preempt context so
540 * the state of the GPU is known (idle).
542 GEM_TRACE("%s\n", engine->name);
543 for (n = execlists_num_ports(execlists); --n; )
544 write_desc(execlists, 0, n);
546 write_desc(execlists, ce->lrc_desc, n);
548 /* we need to manually load the submit queue */
549 if (execlists->ctrl_reg)
550 writel(EL_CTRL_LOAD, execlists->ctrl_reg);
552 execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
553 execlists_set_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT);
556 static void execlists_dequeue(struct intel_engine_cs *engine)
558 struct intel_engine_execlists * const execlists = &engine->execlists;
559 struct execlist_port *port = execlists->port;
560 const struct execlist_port * const last_port =
561 &execlists->port[execlists->port_mask];
562 struct i915_request *last = port_request(port);
566 /* Hardware submission is through 2 ports. Conceptually each port
567 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
568 * static for a context, and unique to each, so we only execute
569 * requests belonging to a single context from each ring. RING_HEAD
570 * is maintained by the CS in the context image, it marks the place
571 * where it got up to last time, and through RING_TAIL we tell the CS
572 * where we want to execute up to this time.
574 * In this list the requests are in order of execution. Consecutive
575 * requests from the same context are adjacent in the ringbuffer. We
576 * can combine these requests into a single RING_TAIL update:
578 * RING_HEAD...req1...req2
580 * since to execute req2 the CS must first execute req1.
582 * Our goal then is to point each port to the end of a consecutive
583 * sequence of requests as being the most optimal (fewest wake ups
584 * and context switches) submission.
587 spin_lock_irq(&engine->timeline.lock);
588 rb = execlists->first;
589 GEM_BUG_ON(rb_first(&execlists->queue) != rb);
593 * Don't resubmit or switch until all outstanding
594 * preemptions (lite-restore) are seen. Then we
595 * know the next preemption status we see corresponds
596 * to this ELSP update.
598 GEM_BUG_ON(!execlists_is_active(execlists,
599 EXECLISTS_ACTIVE_USER));
600 GEM_BUG_ON(!port_count(&port[0]));
601 if (port_count(&port[0]) > 1)
605 * If we write to ELSP a second time before the HW has had
606 * a chance to respond to the previous write, we can confuse
607 * the HW and hit "undefined behaviour". After writing to ELSP,
608 * we must then wait until we see a context-switch event from
609 * the HW to indicate that it has had a chance to respond.
611 if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
614 if (need_preempt(engine, last, execlists->queue_priority)) {
615 inject_preempt_context(engine);
620 * In theory, we could coalesce more requests onto
621 * the second port (the first port is active, with
622 * no preemptions pending). However, that means we
623 * then have to deal with the possible lite-restore
624 * of the second port (as we submit the ELSP, there
625 * may be a context-switch) but also we may complete
626 * the resubmission before the context-switch. Ergo,
627 * coalescing onto the second port will cause a
628 * preemption event, but we cannot predict whether
629 * that will affect port[0] or port[1].
631 * If the second port is already active, we can wait
632 * until the next context-switch before contemplating
633 * new requests. The GPU will be busy and we should be
634 * able to resubmit the new ELSP before it idles,
635 * avoiding pipeline bubbles (momentary pauses where
636 * the driver is unable to keep up the supply of new
637 * work). However, we have to double check that the
638 * priorities of the ports haven't been switch.
640 if (port_count(&port[1]))
644 * WaIdleLiteRestore:bdw,skl
645 * Apply the wa NOOPs to prevent
646 * ring:HEAD == rq:TAIL as we resubmit the
647 * request. See gen8_emit_breadcrumb() for
648 * where we prepare the padding after the
649 * end of the request.
651 last->tail = last->wa_tail;
655 struct i915_priolist *p = to_priolist(rb);
656 struct i915_request *rq, *rn;
658 list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
660 * Can we combine this request with the current port?
661 * It has to be the same context/ringbuffer and not
662 * have any exceptions (e.g. GVT saying never to
665 * If we can combine the requests, we can execute both
666 * by updating the RING_TAIL to point to the end of the
667 * second request, and so we never need to tell the
668 * hardware about the first.
670 if (last && !can_merge_ctx(rq->ctx, last->ctx)) {
672 * If we are on the second port and cannot
673 * combine this request with the last, then we
676 if (port == last_port) {
677 __list_del_many(&p->requests,
683 * If GVT overrides us we only ever submit
684 * port[0], leaving port[1] empty. Note that we
685 * also have to be careful that we don't queue
686 * the same context (even though a different
687 * request) to the second port.
689 if (ctx_single_port_submission(last->ctx) ||
690 ctx_single_port_submission(rq->ctx)) {
691 __list_del_many(&p->requests,
696 GEM_BUG_ON(last->ctx == rq->ctx);
699 port_assign(port, last);
702 GEM_BUG_ON(port_isset(port));
705 INIT_LIST_HEAD(&rq->sched.link);
706 __i915_request_submit(rq);
707 trace_i915_request_in(rq, port_index(port, execlists));
713 rb_erase(&p->node, &execlists->queue);
714 INIT_LIST_HEAD(&p->requests);
715 if (p->priority != I915_PRIORITY_NORMAL)
716 kmem_cache_free(engine->i915->priorities, p);
721 * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
723 * We choose queue_priority such that if we add a request of greater
724 * priority than this, we kick the submission tasklet to decide on
725 * the right order of submitting the requests to hardware. We must
726 * also be prepared to reorder requests as they are in-flight on the
727 * HW. We derive the queue_priority then as the first "hole" in
728 * the HW submission ports and if there are no available slots,
729 * the priority of the lowest executing request, i.e. last.
731 * When we do receive a higher priority request ready to run from the
732 * user, see queue_request(), the queue_priority is bumped to that
733 * request triggering preemption on the next dequeue (or subsequent
734 * interrupt for secondary ports).
736 execlists->queue_priority =
737 port != execlists->port ? rq_prio(last) : INT_MIN;
739 execlists->first = rb;
741 port_assign(port, last);
743 /* We must always keep the beast fed if we have work piled up */
744 GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
747 spin_unlock_irq(&engine->timeline.lock);
750 execlists_user_begin(execlists, execlists->port);
751 execlists_submit_ports(engine);
754 GEM_BUG_ON(port_isset(execlists->port) &&
755 !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
759 execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
761 struct execlist_port *port = execlists->port;
762 unsigned int num_ports = execlists_num_ports(execlists);
764 while (num_ports-- && port_isset(port)) {
765 struct i915_request *rq = port_request(port);
767 GEM_TRACE("%s:port%u global=%d (fence %llx:%d), (current %d)\n",
769 (unsigned int)(port - execlists->port),
771 rq->fence.context, rq->fence.seqno,
772 intel_engine_get_seqno(rq->engine));
774 GEM_BUG_ON(!execlists->active);
775 execlists_context_schedule_out(rq,
776 i915_request_completed(rq) ?
777 INTEL_CONTEXT_SCHEDULE_OUT :
778 INTEL_CONTEXT_SCHEDULE_PREEMPTED);
780 i915_request_put(rq);
782 memset(port, 0, sizeof(*port));
786 execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
787 execlists_user_end(execlists);
790 static void clear_gtiir(struct intel_engine_cs *engine)
792 struct drm_i915_private *dev_priv = engine->i915;
796 * Clear any pending interrupt state.
798 * We do it twice out of paranoia that some of the IIR are
799 * double buffered, and so if we only reset it once there may
800 * still be an interrupt pending.
802 if (INTEL_GEN(dev_priv) >= 11) {
803 static const struct {
807 [RCS] = {0, GEN11_RCS0},
808 [BCS] = {0, GEN11_BCS},
809 [_VCS(0)] = {1, GEN11_VCS(0)},
810 [_VCS(1)] = {1, GEN11_VCS(1)},
811 [_VCS(2)] = {1, GEN11_VCS(2)},
812 [_VCS(3)] = {1, GEN11_VCS(3)},
813 [_VECS(0)] = {1, GEN11_VECS(0)},
814 [_VECS(1)] = {1, GEN11_VECS(1)},
816 unsigned long irqflags;
818 GEM_BUG_ON(engine->id >= ARRAY_SIZE(gen11_gtiir));
820 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
821 for (i = 0; i < 2; i++) {
822 gen11_reset_one_iir(dev_priv,
823 gen11_gtiir[engine->id].bank,
824 gen11_gtiir[engine->id].bit);
826 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
828 static const u8 gtiir[] = {
836 GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
838 for (i = 0; i < 2; i++) {
839 I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
840 engine->irq_keep_mask);
841 POSTING_READ(GEN8_GT_IIR(gtiir[engine->id]));
843 GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) &
844 engine->irq_keep_mask);
848 static void reset_irq(struct intel_engine_cs *engine)
850 /* Mark all CS interrupts as complete */
851 smp_store_mb(engine->execlists.active, 0);
852 synchronize_hardirq(engine->i915->drm.irq);
857 * The port is checked prior to scheduling a tasklet, but
858 * just in case we have suspended the tasklet to do the
859 * wedging make sure that when it wakes, it decides there
860 * is no work to do by clearing the irq_posted bit.
862 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
865 static void execlists_cancel_requests(struct intel_engine_cs *engine)
867 struct intel_engine_execlists * const execlists = &engine->execlists;
868 struct i915_request *rq, *rn;
872 GEM_TRACE("%s current %d\n",
873 engine->name, intel_engine_get_seqno(engine));
876 * Before we call engine->cancel_requests(), we should have exclusive
877 * access to the submission state. This is arranged for us by the
878 * caller disabling the interrupt generation, the tasklet and other
879 * threads that may then access the same state, giving us a free hand
880 * to reset state. However, we still need to let lockdep be aware that
881 * we know this state may be accessed in hardirq context, so we
882 * disable the irq around this manipulation and we want to keep
883 * the spinlock focused on its duties and not accidentally conflate
884 * coverage to the submission's irq state. (Similarly, although we
885 * shouldn't need to disable irq around the manipulation of the
886 * submission's irq state, we also wish to remind ourselves that
889 local_irq_save(flags);
891 /* Cancel the requests on the HW and clear the ELSP tracker. */
892 execlists_cancel_port_requests(execlists);
895 spin_lock(&engine->timeline.lock);
897 /* Mark all executing requests as skipped. */
898 list_for_each_entry(rq, &engine->timeline.requests, link) {
899 GEM_BUG_ON(!rq->global_seqno);
900 if (!i915_request_completed(rq))
901 dma_fence_set_error(&rq->fence, -EIO);
904 /* Flush the queued requests to the timeline list (for retiring). */
905 rb = execlists->first;
907 struct i915_priolist *p = to_priolist(rb);
909 list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
910 INIT_LIST_HEAD(&rq->sched.link);
912 dma_fence_set_error(&rq->fence, -EIO);
913 __i915_request_submit(rq);
917 rb_erase(&p->node, &execlists->queue);
918 INIT_LIST_HEAD(&p->requests);
919 if (p->priority != I915_PRIORITY_NORMAL)
920 kmem_cache_free(engine->i915->priorities, p);
923 /* Remaining _unready_ requests will be nop'ed when submitted */
925 execlists->queue_priority = INT_MIN;
926 execlists->queue = RB_ROOT;
927 execlists->first = NULL;
928 GEM_BUG_ON(port_isset(execlists->port));
930 spin_unlock(&engine->timeline.lock);
932 local_irq_restore(flags);
936 * Check the unread Context Status Buffers and manage the submission of new
937 * contexts to the ELSP accordingly.
939 static void execlists_submission_tasklet(unsigned long data)
941 struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
942 struct intel_engine_execlists * const execlists = &engine->execlists;
943 struct execlist_port *port = execlists->port;
944 struct drm_i915_private *dev_priv = engine->i915;
948 * We can skip acquiring intel_runtime_pm_get() here as it was taken
949 * on our behalf by the request (see i915_gem_mark_busy()) and it will
950 * not be relinquished until the device is idle (see
951 * i915_gem_idle_work_handler()). As a precaution, we make sure
952 * that all ELSP are drained i.e. we have processed the CSB,
953 * before allowing ourselves to idle and calling intel_runtime_pm_put().
955 GEM_BUG_ON(!dev_priv->gt.awake);
958 * Prefer doing test_and_clear_bit() as a two stage operation to avoid
959 * imposing the cost of a locked atomic transaction when submitting a
960 * new request (outside of the context-switch interrupt).
962 while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
963 /* The HWSP contains a (cacheable) mirror of the CSB */
965 &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
966 unsigned int head, tail;
968 if (unlikely(execlists->csb_use_mmio)) {
969 buf = (u32 * __force)
970 (dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
971 execlists->csb_head = -1; /* force mmio read of CSB ptrs */
974 /* Clear before reading to catch new interrupts */
975 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
976 smp_mb__after_atomic();
978 if (unlikely(execlists->csb_head == -1)) { /* following a reset */
980 intel_uncore_forcewake_get(dev_priv,
981 execlists->fw_domains);
985 head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
986 tail = GEN8_CSB_WRITE_PTR(head);
987 head = GEN8_CSB_READ_PTR(head);
988 execlists->csb_head = head;
990 const int write_idx =
991 intel_hws_csb_write_index(dev_priv) -
992 I915_HWS_CSB_BUF0_INDEX;
994 head = execlists->csb_head;
995 tail = READ_ONCE(buf[write_idx]);
997 GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n",
999 head, GEN8_CSB_READ_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?",
1000 tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?");
1002 while (head != tail) {
1003 struct i915_request *rq;
1004 unsigned int status;
1007 if (++head == GEN8_CSB_ENTRIES)
1010 /* We are flying near dragons again.
1012 * We hold a reference to the request in execlist_port[]
1013 * but no more than that. We are operating in softirq
1014 * context and so cannot hold any mutex or sleep. That
1015 * prevents us stopping the requests we are processing
1016 * in port[] from being retired simultaneously (the
1017 * breadcrumb will be complete before we see the
1018 * context-switch). As we only hold the reference to the
1019 * request, any pointer chasing underneath the request
1020 * is subject to a potential use-after-free. Thus we
1021 * store all of the bookkeeping within port[] as
1022 * required, and avoid using unguarded pointers beneath
1023 * request itself. The same applies to the atomic
1027 status = READ_ONCE(buf[2 * head]); /* maybe mmio! */
1028 GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
1030 status, buf[2*head + 1],
1033 if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
1034 GEN8_CTX_STATUS_PREEMPTED))
1035 execlists_set_active(execlists,
1036 EXECLISTS_ACTIVE_HWACK);
1037 if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
1038 execlists_clear_active(execlists,
1039 EXECLISTS_ACTIVE_HWACK);
1041 if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
1044 /* We should never get a COMPLETED | IDLE_ACTIVE! */
1045 GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
1047 if (status & GEN8_CTX_STATUS_COMPLETE &&
1048 buf[2*head + 1] == execlists->preempt_complete_status) {
1049 GEM_TRACE("%s preempt-idle\n", engine->name);
1051 execlists_cancel_port_requests(execlists);
1052 execlists_unwind_incomplete_requests(execlists);
1054 GEM_BUG_ON(!execlists_is_active(execlists,
1055 EXECLISTS_ACTIVE_PREEMPT));
1056 execlists_clear_active(execlists,
1057 EXECLISTS_ACTIVE_PREEMPT);
1061 if (status & GEN8_CTX_STATUS_PREEMPTED &&
1062 execlists_is_active(execlists,
1063 EXECLISTS_ACTIVE_PREEMPT))
1066 GEM_BUG_ON(!execlists_is_active(execlists,
1067 EXECLISTS_ACTIVE_USER));
1069 rq = port_unpack(port, &count);
1070 GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n",
1072 port->context_id, count,
1073 rq ? rq->global_seqno : 0,
1074 rq ? rq->fence.context : 0,
1075 rq ? rq->fence.seqno : 0,
1076 intel_engine_get_seqno(engine),
1077 rq ? rq_prio(rq) : 0);
1079 /* Check the context/desc id for this event matches */
1080 GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
1082 GEM_BUG_ON(count == 0);
1085 * On the final event corresponding to the
1086 * submission of this context, we expect either
1087 * an element-switch event or a completion
1088 * event (and on completion, the active-idle
1089 * marker). No more preemptions, lite-restore
1092 GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
1093 GEM_BUG_ON(port_isset(&port[1]) &&
1094 !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
1095 GEM_BUG_ON(!port_isset(&port[1]) &&
1096 !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
1099 * We rely on the hardware being strongly
1100 * ordered, that the breadcrumb write is
1101 * coherent (visible from the CPU) before the
1102 * user interrupt and CSB is processed.
1104 GEM_BUG_ON(!i915_request_completed(rq));
1106 execlists_context_schedule_out(rq,
1107 INTEL_CONTEXT_SCHEDULE_OUT);
1108 i915_request_put(rq);
1110 GEM_TRACE("%s completed ctx=%d\n",
1111 engine->name, port->context_id);
1113 port = execlists_port_complete(execlists, port);
1114 if (port_isset(port))
1115 execlists_user_begin(execlists, port);
1117 execlists_user_end(execlists);
1119 port_set(port, port_pack(rq, count));
1123 if (head != execlists->csb_head) {
1124 execlists->csb_head = head;
1125 writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
1126 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
1130 if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
1131 execlists_dequeue(engine);
1134 intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
1136 /* If the engine is now idle, so should be the flag; and vice versa. */
1137 GEM_BUG_ON(execlists_is_active(&engine->execlists,
1138 EXECLISTS_ACTIVE_USER) ==
1139 !port_isset(engine->execlists.port));
1142 static void queue_request(struct intel_engine_cs *engine,
1143 struct i915_sched_node *node,
1146 list_add_tail(&node->link,
1147 &lookup_priolist(engine, prio)->requests);
1150 static void __submit_queue(struct intel_engine_cs *engine, int prio)
1152 engine->execlists.queue_priority = prio;
1153 tasklet_hi_schedule(&engine->execlists.tasklet);
1156 static void submit_queue(struct intel_engine_cs *engine, int prio)
1158 if (prio > engine->execlists.queue_priority)
1159 __submit_queue(engine, prio);
1162 static void execlists_submit_request(struct i915_request *request)
1164 struct intel_engine_cs *engine = request->engine;
1165 unsigned long flags;
1167 /* Will be called from irq-context when using foreign fences. */
1168 spin_lock_irqsave(&engine->timeline.lock, flags);
1170 queue_request(engine, &request->sched, rq_prio(request));
1171 submit_queue(engine, rq_prio(request));
1173 GEM_BUG_ON(!engine->execlists.first);
1174 GEM_BUG_ON(list_empty(&request->sched.link));
1176 spin_unlock_irqrestore(&engine->timeline.lock, flags);
1179 static struct i915_request *sched_to_request(struct i915_sched_node *node)
1181 return container_of(node, struct i915_request, sched);
1184 static struct intel_engine_cs *
1185 sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
1187 struct intel_engine_cs *engine = sched_to_request(node)->engine;
1189 GEM_BUG_ON(!locked);
1191 if (engine != locked) {
1192 spin_unlock(&locked->timeline.lock);
1193 spin_lock(&engine->timeline.lock);
1199 static void execlists_schedule(struct i915_request *request,
1200 const struct i915_sched_attr *attr)
1202 struct i915_priolist *uninitialized_var(pl);
1203 struct intel_engine_cs *engine, *last;
1204 struct i915_dependency *dep, *p;
1205 struct i915_dependency stack;
1206 const int prio = attr->priority;
1209 GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
1211 if (i915_request_completed(request))
1214 if (prio <= READ_ONCE(request->sched.attr.priority))
1217 /* Need BKL in order to use the temporary link inside i915_dependency */
1218 lockdep_assert_held(&request->i915->drm.struct_mutex);
1220 stack.signaler = &request->sched;
1221 list_add(&stack.dfs_link, &dfs);
1224 * Recursively bump all dependent priorities to match the new request.
1226 * A naive approach would be to use recursion:
1227 * static void update_priorities(struct i915_sched_node *node, prio) {
1228 * list_for_each_entry(dep, &node->signalers_list, signal_link)
1229 * update_priorities(dep->signal, prio)
1230 * queue_request(node);
1232 * but that may have unlimited recursion depth and so runs a very
1233 * real risk of overunning the kernel stack. Instead, we build
1234 * a flat list of all dependencies starting with the current request.
1235 * As we walk the list of dependencies, we add all of its dependencies
1236 * to the end of the list (this may include an already visited
1237 * request) and continue to walk onwards onto the new dependencies. The
1238 * end result is a topological list of requests in reverse order, the
1239 * last element in the list is the request we must execute first.
1241 list_for_each_entry(dep, &dfs, dfs_link) {
1242 struct i915_sched_node *node = dep->signaler;
1245 * Within an engine, there can be no cycle, but we may
1246 * refer to the same dependency chain multiple times
1247 * (redundant dependencies are not eliminated) and across
1250 list_for_each_entry(p, &node->signalers_list, signal_link) {
1251 GEM_BUG_ON(p == dep); /* no cycles! */
1253 if (i915_sched_node_signaled(p->signaler))
1256 GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
1257 if (prio > READ_ONCE(p->signaler->attr.priority))
1258 list_move_tail(&p->dfs_link, &dfs);
1263 * If we didn't need to bump any existing priorities, and we haven't
1264 * yet submitted this request (i.e. there is no potential race with
1265 * execlists_submit_request()), we can set our own priority and skip
1266 * acquiring the engine locks.
1268 if (request->sched.attr.priority == I915_PRIORITY_INVALID) {
1269 GEM_BUG_ON(!list_empty(&request->sched.link));
1270 request->sched.attr = *attr;
1271 if (stack.dfs_link.next == stack.dfs_link.prev)
1273 __list_del_entry(&stack.dfs_link);
1277 engine = request->engine;
1278 spin_lock_irq(&engine->timeline.lock);
1280 /* Fifo and depth-first replacement ensure our deps execute before us */
1281 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
1282 struct i915_sched_node *node = dep->signaler;
1284 INIT_LIST_HEAD(&dep->dfs_link);
1286 engine = sched_lock_engine(node, engine);
1288 if (prio <= node->attr.priority)
1291 node->attr.priority = prio;
1292 if (!list_empty(&node->link)) {
1293 if (last != engine) {
1294 pl = lookup_priolist(engine, prio);
1297 GEM_BUG_ON(pl->priority != prio);
1298 list_move_tail(&node->link, &pl->requests);
1301 if (prio > engine->execlists.queue_priority &&
1302 i915_sw_fence_done(&sched_to_request(node)->submit))
1303 __submit_queue(engine, prio);
1306 spin_unlock_irq(&engine->timeline.lock);
1309 static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
1315 * Clear this page out of any CPU caches for coherent swap-in/out.
1316 * We only want to do this on the first bind so that we do not stall
1317 * on an active context (which by nature is already on the GPU).
1319 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1320 err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1325 flags = PIN_GLOBAL | PIN_HIGH;
1326 if (ctx->ggtt_offset_bias)
1327 flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
1329 return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags);
1332 static struct intel_ring *
1333 execlists_context_pin(struct intel_engine_cs *engine,
1334 struct i915_gem_context *ctx)
1336 struct intel_context *ce = to_intel_context(ctx, engine);
1340 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1342 if (likely(ce->pin_count++))
1344 GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
1346 ret = execlists_context_deferred_alloc(ctx, engine);
1349 GEM_BUG_ON(!ce->state);
1351 ret = __context_pin(ctx, ce->state);
1355 vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
1356 if (IS_ERR(vaddr)) {
1357 ret = PTR_ERR(vaddr);
1361 ret = intel_ring_pin(ce->ring, ctx->i915, ctx->ggtt_offset_bias);
1365 intel_lr_context_descriptor_update(ctx, engine);
1367 ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
1368 ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
1369 i915_ggtt_offset(ce->ring->vma);
1370 ce->lrc_reg_state[CTX_RING_HEAD+1] = ce->ring->head;
1372 ce->state->obj->pin_global++;
1373 i915_gem_context_get(ctx);
1378 i915_gem_object_unpin_map(ce->state->obj);
1380 __i915_vma_unpin(ce->state);
1383 return ERR_PTR(ret);
1386 static void execlists_context_unpin(struct intel_engine_cs *engine,
1387 struct i915_gem_context *ctx)
1389 struct intel_context *ce = to_intel_context(ctx, engine);
1391 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1392 GEM_BUG_ON(ce->pin_count == 0);
1394 if (--ce->pin_count)
1397 intel_ring_unpin(ce->ring);
1399 ce->state->obj->pin_global--;
1400 i915_gem_object_unpin_map(ce->state->obj);
1401 i915_vma_unpin(ce->state);
1403 i915_gem_context_put(ctx);
1406 static int execlists_request_alloc(struct i915_request *request)
1408 struct intel_context *ce =
1409 to_intel_context(request->ctx, request->engine);
1412 GEM_BUG_ON(!ce->pin_count);
1414 /* Flush enough space to reduce the likelihood of waiting after
1415 * we start building the request - in which case we will just
1416 * have to repeat work.
1418 request->reserved_space += EXECLISTS_REQUEST_SIZE;
1420 ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
1424 /* Note that after this point, we have committed to using
1425 * this request as it is being used to both track the
1426 * state of engine initialisation and liveness of the
1427 * golden renderstate above. Think twice before you try
1428 * to cancel/unwind this request now.
1431 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
1436 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
1437 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
1438 * but there is a slight complication as this is applied in WA batch where the
1439 * values are only initialized once so we cannot take register value at the
1440 * beginning and reuse it further; hence we save its value to memory, upload a
1441 * constant value with bit21 set and then we restore it back with the saved value.
1442 * To simplify the WA, a constant value is formed by using the default value
1443 * of this register. This shouldn't be a problem because we are only modifying
1444 * it for a short period and this batch in non-premptible. We can ofcourse
1445 * use additional instructions that read the actual value of the register
1446 * at that time and set our bit of interest but it makes the WA complicated.
1448 * This WA is also required for Gen9 so extracting as a function avoids
1452 gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
1454 *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
1455 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
1456 *batch++ = i915_ggtt_offset(engine->scratch) + 256;
1459 *batch++ = MI_LOAD_REGISTER_IMM(1);
1460 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
1461 *batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES;
1463 batch = gen8_emit_pipe_control(batch,
1464 PIPE_CONTROL_CS_STALL |
1465 PIPE_CONTROL_DC_FLUSH_ENABLE,
1468 *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
1469 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
1470 *batch++ = i915_ggtt_offset(engine->scratch) + 256;
1477 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
1478 * initialized at the beginning and shared across all contexts but this field
1479 * helps us to have multiple batches at different offsets and select them based
1480 * on a criteria. At the moment this batch always start at the beginning of the page
1481 * and at this point we don't have multiple wa_ctx batch buffers.
1483 * The number of WA applied are not known at the beginning; we use this field
1484 * to return the no of DWORDS written.
1486 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1487 * so it adds NOOPs as padding to make it cacheline aligned.
1488 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1489 * makes a complete batch buffer.
1491 static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1493 /* WaDisableCtxRestoreArbitration:bdw,chv */
1494 *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1496 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1497 if (IS_BROADWELL(engine->i915))
1498 batch = gen8_emit_flush_coherentl3_wa(engine, batch);
1500 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1501 /* Actual scratch location is at 128 bytes offset */
1502 batch = gen8_emit_pipe_control(batch,
1503 PIPE_CONTROL_FLUSH_L3 |
1504 PIPE_CONTROL_GLOBAL_GTT_IVB |
1505 PIPE_CONTROL_CS_STALL |
1506 PIPE_CONTROL_QW_WRITE,
1507 i915_ggtt_offset(engine->scratch) +
1508 2 * CACHELINE_BYTES);
1510 *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1512 /* Pad to end of cacheline */
1513 while ((unsigned long)batch % CACHELINE_BYTES)
1517 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1518 * execution depends on the length specified in terms of cache lines
1519 * in the register CTX_RCS_INDIRECT_CTX
1525 static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1527 *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1529 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
1530 batch = gen8_emit_flush_coherentl3_wa(engine, batch);
1532 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
1533 *batch++ = MI_LOAD_REGISTER_IMM(1);
1534 *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
1535 *batch++ = _MASKED_BIT_DISABLE(
1536 GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
1539 /* WaClearSlmSpaceAtContextSwitch:kbl */
1540 /* Actual scratch location is at 128 bytes offset */
1541 if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
1542 batch = gen8_emit_pipe_control(batch,
1543 PIPE_CONTROL_FLUSH_L3 |
1544 PIPE_CONTROL_GLOBAL_GTT_IVB |
1545 PIPE_CONTROL_CS_STALL |
1546 PIPE_CONTROL_QW_WRITE,
1547 i915_ggtt_offset(engine->scratch)
1548 + 2 * CACHELINE_BYTES);
1551 /* WaMediaPoolStateCmdInWABB:bxt,glk */
1552 if (HAS_POOLED_EU(engine->i915)) {
1554 * EU pool configuration is setup along with golden context
1555 * during context initialization. This value depends on
1556 * device type (2x6 or 3x6) and needs to be updated based
1557 * on which subslice is disabled especially for 2x6
1558 * devices, however it is safe to load default
1559 * configuration of 3x6 device instead of masking off
1560 * corresponding bits because HW ignores bits of a disabled
1561 * subslice and drops down to appropriate config. Please
1562 * see render_state_setup() in i915_gem_render_state.c for
1563 * possible configurations, to avoid duplication they are
1564 * not shown here again.
1566 *batch++ = GEN9_MEDIA_POOL_STATE;
1567 *batch++ = GEN9_MEDIA_POOL_ENABLE;
1568 *batch++ = 0x00777000;
1574 *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1576 /* Pad to end of cacheline */
1577 while ((unsigned long)batch % CACHELINE_BYTES)
1584 gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1589 * WaPipeControlBefore3DStateSamplePattern: cnl
1591 * Ensure the engine is idle prior to programming a
1592 * 3DSTATE_SAMPLE_PATTERN during a context restore.
1594 batch = gen8_emit_pipe_control(batch,
1595 PIPE_CONTROL_CS_STALL,
1598 * WaPipeControlBefore3DStateSamplePattern says we need 4 dwords for
1599 * the PIPE_CONTROL followed by 12 dwords of 0x0, so 16 dwords in
1600 * total. However, a PIPE_CONTROL is 6 dwords long, not 4, which is
1601 * confusing. Since gen8_emit_pipe_control() already advances the
1602 * batch by 6 dwords, we advance the other 10 here, completing a
1603 * cacheline. It's not clear if the workaround requires this padding
1604 * before other commands, or if it's just the regular padding we would
1605 * already have for the workaround bb, so leave it here for now.
1607 for (i = 0; i < 10; i++)
1610 /* Pad to end of cacheline */
1611 while ((unsigned long)batch % CACHELINE_BYTES)
1617 #define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE)
1619 static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
1621 struct drm_i915_gem_object *obj;
1622 struct i915_vma *vma;
1625 obj = i915_gem_object_create(engine->i915, CTX_WA_BB_OBJ_SIZE);
1627 return PTR_ERR(obj);
1629 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
1635 err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH);
1639 engine->wa_ctx.vma = vma;
1643 i915_gem_object_put(obj);
1647 static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
1649 i915_vma_unpin_and_release(&engine->wa_ctx.vma);
1652 typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
1654 static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1656 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1657 struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
1659 wa_bb_func_t wa_bb_fn[2];
1661 void *batch, *batch_ptr;
1665 if (GEM_WARN_ON(engine->id != RCS))
1668 switch (INTEL_GEN(engine->i915)) {
1670 wa_bb_fn[0] = gen10_init_indirectctx_bb;
1674 wa_bb_fn[0] = gen9_init_indirectctx_bb;
1678 wa_bb_fn[0] = gen8_init_indirectctx_bb;
1682 MISSING_CASE(INTEL_GEN(engine->i915));
1686 ret = lrc_setup_wa_ctx(engine);
1688 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1692 page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
1693 batch = batch_ptr = kmap_atomic(page);
1696 * Emit the two workaround batch buffers, recording the offset from the
1697 * start of the workaround batch buffer object for each and their
1700 for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
1701 wa_bb[i]->offset = batch_ptr - batch;
1702 if (GEM_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
1703 CACHELINE_BYTES))) {
1708 batch_ptr = wa_bb_fn[i](engine, batch_ptr);
1709 wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
1712 BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
1714 kunmap_atomic(batch);
1716 lrc_destroy_wa_ctx(engine);
1721 static void enable_execlists(struct intel_engine_cs *engine)
1723 struct drm_i915_private *dev_priv = engine->i915;
1725 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
1728 * Make sure we're not enabling the new 12-deep CSB
1729 * FIFO as that requires a slightly updated handling
1730 * in the ctx switch irq. Since we're currently only
1731 * using only 2 elements of the enhanced execlists the
1732 * deeper FIFO it's not needed and it's not worth adding
1733 * more statements to the irq handler to support it.
1735 if (INTEL_GEN(dev_priv) >= 11)
1736 I915_WRITE(RING_MODE_GEN7(engine),
1737 _MASKED_BIT_DISABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
1739 I915_WRITE(RING_MODE_GEN7(engine),
1740 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1742 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1743 engine->status_page.ggtt_offset);
1744 POSTING_READ(RING_HWS_PGA(engine->mmio_base));
1746 /* Following the reset, we need to reload the CSB read/write pointers */
1747 engine->execlists.csb_head = -1;
1750 static int gen8_init_common_ring(struct intel_engine_cs *engine)
1752 struct intel_engine_execlists * const execlists = &engine->execlists;
1755 ret = intel_mocs_init_engine(engine);
1759 intel_engine_reset_breadcrumbs(engine);
1760 intel_engine_init_hangcheck(engine);
1762 enable_execlists(engine);
1764 /* After a GPU reset, we may have requests to replay */
1765 if (execlists->first)
1766 tasklet_schedule(&execlists->tasklet);
1771 static int gen8_init_render_ring(struct intel_engine_cs *engine)
1773 struct drm_i915_private *dev_priv = engine->i915;
1776 ret = gen8_init_common_ring(engine);
1780 intel_whitelist_workarounds_apply(engine);
1782 /* We need to disable the AsyncFlip performance optimisations in order
1783 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1784 * programmed to '1' on all products.
1786 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1788 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1790 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1795 static int gen9_init_render_ring(struct intel_engine_cs *engine)
1799 ret = gen8_init_common_ring(engine);
1803 intel_whitelist_workarounds_apply(engine);
1808 static void reset_common_ring(struct intel_engine_cs *engine,
1809 struct i915_request *request)
1811 struct intel_engine_execlists * const execlists = &engine->execlists;
1812 unsigned long flags;
1815 GEM_TRACE("%s request global=%x, current=%d\n",
1816 engine->name, request ? request->global_seqno : 0,
1817 intel_engine_get_seqno(engine));
1819 /* See execlists_cancel_requests() for the irq/spinlock split. */
1820 local_irq_save(flags);
1823 * Catch up with any missed context-switch interrupts.
1825 * Ideally we would just read the remaining CSB entries now that we
1826 * know the gpu is idle. However, the CSB registers are sometimes^W
1827 * often trashed across a GPU reset! Instead we have to rely on
1828 * guessing the missed context-switch events by looking at what
1829 * requests were completed.
1831 execlists_cancel_port_requests(execlists);
1834 /* Push back any incomplete requests for replay after the reset. */
1835 spin_lock(&engine->timeline.lock);
1836 __unwind_incomplete_requests(engine);
1837 spin_unlock(&engine->timeline.lock);
1839 local_irq_restore(flags);
1842 * If the request was innocent, we leave the request in the ELSP
1843 * and will try to replay it on restarting. The context image may
1844 * have been corrupted by the reset, in which case we may have
1845 * to service a new GPU hang, but more likely we can continue on
1848 * If the request was guilty, we presume the context is corrupt
1849 * and have to at least restore the RING register in the context
1850 * image back to the expected values to skip over the guilty request.
1852 if (!request || request->fence.error != -EIO)
1856 * We want a simple context + ring to execute the breadcrumb update.
1857 * We cannot rely on the context being intact across the GPU hang,
1858 * so clear it and rebuild just what we need for the breadcrumb.
1859 * All pending requests for this context will be zapped, and any
1860 * future request will be after userspace has had the opportunity
1861 * to recreate its own state.
1863 regs = to_intel_context(request->ctx, engine)->lrc_reg_state;
1864 if (engine->default_state) {
1867 defaults = i915_gem_object_pin_map(engine->default_state,
1869 if (!IS_ERR(defaults)) {
1870 memcpy(regs, /* skip restoring the vanilla PPHWSP */
1871 defaults + LRC_STATE_PN * PAGE_SIZE,
1872 engine->context_size - PAGE_SIZE);
1873 i915_gem_object_unpin_map(engine->default_state);
1876 execlists_init_reg_state(regs, request->ctx, engine, request->ring);
1878 /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
1879 regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma);
1880 regs[CTX_RING_HEAD + 1] = request->postfix;
1882 request->ring->head = request->postfix;
1883 intel_ring_update_space(request->ring);
1885 /* Reset WaIdleLiteRestore:bdw,skl as well */
1886 unwind_wa_tail(request);
1889 static int intel_logical_ring_emit_pdps(struct i915_request *rq)
1891 struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
1892 struct intel_engine_cs *engine = rq->engine;
1893 const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
1897 cs = intel_ring_begin(rq, num_lri_cmds * 2 + 2);
1901 *cs++ = MI_LOAD_REGISTER_IMM(num_lri_cmds);
1902 for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
1903 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1905 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
1906 *cs++ = upper_32_bits(pd_daddr);
1907 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
1908 *cs++ = lower_32_bits(pd_daddr);
1912 intel_ring_advance(rq, cs);
1917 static int gen8_emit_bb_start(struct i915_request *rq,
1918 u64 offset, u32 len,
1919 const unsigned int flags)
1924 /* Don't rely in hw updating PDPs, specially in lite-restore.
1925 * Ideally, we should set Force PD Restore in ctx descriptor,
1926 * but we can't. Force Restore would be a second option, but
1927 * it is unsafe in case of lite-restore (because the ctx is
1928 * not idle). PML4 is allocated during ppgtt init so this is
1929 * not needed in 48-bit.*/
1930 if (rq->ctx->ppgtt &&
1931 (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) &&
1932 !i915_vm_is_48bit(&rq->ctx->ppgtt->base) &&
1933 !intel_vgpu_active(rq->i915)) {
1934 ret = intel_logical_ring_emit_pdps(rq);
1938 rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
1941 cs = intel_ring_begin(rq, 6);
1946 * WaDisableCtxRestoreArbitration:bdw,chv
1948 * We don't need to perform MI_ARB_ENABLE as often as we do (in
1949 * particular all the gen that do not need the w/a at all!), if we
1950 * took care to make sure that on every switch into this context
1951 * (both ordinary and for preemption) that arbitrartion was enabled
1952 * we would be fine. However, there doesn't seem to be a downside to
1953 * being paranoid and making sure it is set before each batch and
1954 * every context-switch.
1956 * Note that if we fail to enable arbitration before the request
1957 * is complete, then we do not see the context-switch interrupt and
1958 * the engine hangs (with RING_HEAD == RING_TAIL).
1960 * That satisfies both the GPGPU w/a and our heavy-handed paranoia.
1962 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1964 /* FIXME(BDW): Address space and security selectors. */
1965 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
1966 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)) |
1967 (flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
1968 *cs++ = lower_32_bits(offset);
1969 *cs++ = upper_32_bits(offset);
1971 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1973 intel_ring_advance(rq, cs);
1978 static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
1980 struct drm_i915_private *dev_priv = engine->i915;
1981 I915_WRITE_IMR(engine,
1982 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1983 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1986 static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
1988 struct drm_i915_private *dev_priv = engine->i915;
1989 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1992 static int gen8_emit_flush(struct i915_request *request, u32 mode)
1996 cs = intel_ring_begin(request, 4);
2000 cmd = MI_FLUSH_DW + 1;
2002 /* We always require a command barrier so that subsequent
2003 * commands, such as breadcrumb interrupts, are strictly ordered
2004 * wrt the contents of the write cache being flushed to memory
2005 * (and thus being coherent from the CPU).
2007 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2009 if (mode & EMIT_INVALIDATE) {
2010 cmd |= MI_INVALIDATE_TLB;
2011 if (request->engine->id == VCS)
2012 cmd |= MI_INVALIDATE_BSD;
2016 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
2017 *cs++ = 0; /* upper addr */
2018 *cs++ = 0; /* value */
2019 intel_ring_advance(request, cs);
2024 static int gen8_emit_flush_render(struct i915_request *request,
2027 struct intel_engine_cs *engine = request->engine;
2029 i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
2030 bool vf_flush_wa = false, dc_flush_wa = false;
2034 flags |= PIPE_CONTROL_CS_STALL;
2036 if (mode & EMIT_FLUSH) {
2037 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
2038 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
2039 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
2040 flags |= PIPE_CONTROL_FLUSH_ENABLE;
2043 if (mode & EMIT_INVALIDATE) {
2044 flags |= PIPE_CONTROL_TLB_INVALIDATE;
2045 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
2046 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
2047 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
2048 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
2049 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
2050 flags |= PIPE_CONTROL_QW_WRITE;
2051 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
2054 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
2057 if (IS_GEN9(request->i915))
2060 /* WaForGAMHang:kbl */
2061 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
2073 cs = intel_ring_begin(request, len);
2078 cs = gen8_emit_pipe_control(cs, 0, 0);
2081 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
2084 cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
2087 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
2089 intel_ring_advance(request, cs);
2095 * Reserve space for 2 NOOPs at the end of each request to be
2096 * used as a workaround for not being allowed to do lite
2097 * restore with HEAD==TAIL (WaIdleLiteRestore).
2099 static void gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
2101 /* Ensure there's always at least one preemption point per-request. */
2102 *cs++ = MI_ARB_CHECK;
2104 request->wa_tail = intel_ring_offset(request, cs);
2107 static void gen8_emit_breadcrumb(struct i915_request *request, u32 *cs)
2109 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
2110 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
2112 cs = gen8_emit_ggtt_write(cs, request->global_seqno,
2113 intel_hws_seqno_address(request->engine));
2114 *cs++ = MI_USER_INTERRUPT;
2115 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
2116 request->tail = intel_ring_offset(request, cs);
2117 assert_ring_tail_valid(request->ring, request->tail);
2119 gen8_emit_wa_tail(request, cs);
2121 static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
2123 static void gen8_emit_breadcrumb_rcs(struct i915_request *request, u32 *cs)
2125 /* We're using qword write, seqno should be aligned to 8 bytes. */
2126 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
2128 cs = gen8_emit_ggtt_write_rcs(cs, request->global_seqno,
2129 intel_hws_seqno_address(request->engine));
2130 *cs++ = MI_USER_INTERRUPT;
2131 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
2132 request->tail = intel_ring_offset(request, cs);
2133 assert_ring_tail_valid(request->ring, request->tail);
2135 gen8_emit_wa_tail(request, cs);
2137 static const int gen8_emit_breadcrumb_rcs_sz = 8 + WA_TAIL_DWORDS;
2139 static int gen8_init_rcs_context(struct i915_request *rq)
2143 ret = intel_ctx_workarounds_emit(rq);
2147 ret = intel_rcs_context_init_mocs(rq);
2149 * Failing to program the MOCS is non-fatal.The system will not
2150 * run at peak performance. So generate an error and carry on.
2153 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
2155 return i915_gem_render_state_emit(rq);
2159 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
2160 * @engine: Engine Command Streamer.
2162 void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
2164 struct drm_i915_private *dev_priv;
2167 * Tasklet cannot be active at this point due intel_mark_active/idle
2168 * so this is just for documentation.
2170 if (WARN_ON(test_bit(TASKLET_STATE_SCHED,
2171 &engine->execlists.tasklet.state)))
2172 tasklet_kill(&engine->execlists.tasklet);
2174 dev_priv = engine->i915;
2176 if (engine->buffer) {
2177 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
2180 if (engine->cleanup)
2181 engine->cleanup(engine);
2183 intel_engine_cleanup_common(engine);
2185 lrc_destroy_wa_ctx(engine);
2187 engine->i915 = NULL;
2188 dev_priv->engine[engine->id] = NULL;
2192 static void execlists_set_default_submission(struct intel_engine_cs *engine)
2194 engine->submit_request = execlists_submit_request;
2195 engine->cancel_requests = execlists_cancel_requests;
2196 engine->schedule = execlists_schedule;
2197 engine->execlists.tasklet.func = execlists_submission_tasklet;
2199 engine->park = NULL;
2200 engine->unpark = NULL;
2202 engine->flags |= I915_ENGINE_SUPPORTS_STATS;
2203 if (engine->i915->preempt_context)
2204 engine->flags |= I915_ENGINE_HAS_PREEMPTION;
2206 engine->i915->caps.scheduler =
2207 I915_SCHEDULER_CAP_ENABLED |
2208 I915_SCHEDULER_CAP_PRIORITY;
2209 if (intel_engine_has_preemption(engine))
2210 engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION;
2214 logical_ring_default_vfuncs(struct intel_engine_cs *engine)
2216 /* Default vfuncs which can be overriden by each engine. */
2217 engine->init_hw = gen8_init_common_ring;
2218 engine->reset_hw = reset_common_ring;
2220 engine->context_pin = execlists_context_pin;
2221 engine->context_unpin = execlists_context_unpin;
2223 engine->request_alloc = execlists_request_alloc;
2225 engine->emit_flush = gen8_emit_flush;
2226 engine->emit_breadcrumb = gen8_emit_breadcrumb;
2227 engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
2229 engine->set_default_submission = execlists_set_default_submission;
2231 if (INTEL_GEN(engine->i915) < 11) {
2232 engine->irq_enable = gen8_logical_ring_enable_irq;
2233 engine->irq_disable = gen8_logical_ring_disable_irq;
2236 * TODO: On Gen11 interrupt masks need to be clear
2237 * to allow C6 entry. Keep interrupts enabled at
2238 * and take the hit of generating extra interrupts
2239 * until a more refined solution exists.
2242 engine->emit_bb_start = gen8_emit_bb_start;
2246 logical_ring_default_irqs(struct intel_engine_cs *engine)
2248 unsigned int shift = 0;
2250 if (INTEL_GEN(engine->i915) < 11) {
2251 const u8 irq_shifts[] = {
2252 [RCS] = GEN8_RCS_IRQ_SHIFT,
2253 [BCS] = GEN8_BCS_IRQ_SHIFT,
2254 [VCS] = GEN8_VCS1_IRQ_SHIFT,
2255 [VCS2] = GEN8_VCS2_IRQ_SHIFT,
2256 [VECS] = GEN8_VECS_IRQ_SHIFT,
2259 shift = irq_shifts[engine->id];
2262 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
2263 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
2267 logical_ring_setup(struct intel_engine_cs *engine)
2269 struct drm_i915_private *dev_priv = engine->i915;
2270 enum forcewake_domains fw_domains;
2272 intel_engine_setup_common(engine);
2274 /* Intentionally left blank. */
2275 engine->buffer = NULL;
2277 fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
2281 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2282 RING_CONTEXT_STATUS_PTR(engine),
2283 FW_REG_READ | FW_REG_WRITE);
2285 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2286 RING_CONTEXT_STATUS_BUF_BASE(engine),
2289 engine->execlists.fw_domains = fw_domains;
2291 tasklet_init(&engine->execlists.tasklet,
2292 execlists_submission_tasklet, (unsigned long)engine);
2294 logical_ring_default_vfuncs(engine);
2295 logical_ring_default_irqs(engine);
2298 static int logical_ring_init(struct intel_engine_cs *engine)
2302 ret = intel_engine_init_common(engine);
2306 if (HAS_LOGICAL_RING_ELSQ(engine->i915)) {
2307 engine->execlists.submit_reg = engine->i915->regs +
2308 i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
2309 engine->execlists.ctrl_reg = engine->i915->regs +
2310 i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine));
2312 engine->execlists.submit_reg = engine->i915->regs +
2313 i915_mmio_reg_offset(RING_ELSP(engine));
2316 engine->execlists.preempt_complete_status = ~0u;
2317 if (engine->i915->preempt_context) {
2318 struct intel_context *ce =
2319 to_intel_context(engine->i915->preempt_context, engine);
2321 engine->execlists.preempt_complete_status =
2322 upper_32_bits(ce->lrc_desc);
2328 intel_logical_ring_cleanup(engine);
2332 int logical_render_ring_init(struct intel_engine_cs *engine)
2334 struct drm_i915_private *dev_priv = engine->i915;
2337 logical_ring_setup(engine);
2339 if (HAS_L3_DPF(dev_priv))
2340 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2342 /* Override some for render ring. */
2343 if (INTEL_GEN(dev_priv) >= 9)
2344 engine->init_hw = gen9_init_render_ring;
2346 engine->init_hw = gen8_init_render_ring;
2347 engine->init_context = gen8_init_rcs_context;
2348 engine->emit_flush = gen8_emit_flush_render;
2349 engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs;
2350 engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_rcs_sz;
2352 ret = intel_engine_create_scratch(engine, PAGE_SIZE);
2356 ret = intel_init_workaround_bb(engine);
2359 * We continue even if we fail to initialize WA batch
2360 * because we only expect rare glitches but nothing
2361 * critical to prevent us from using GPU
2363 DRM_ERROR("WA batch buffer initialization failed: %d\n",
2367 return logical_ring_init(engine);
2370 int logical_xcs_ring_init(struct intel_engine_cs *engine)
2372 logical_ring_setup(engine);
2374 return logical_ring_init(engine);
2378 make_rpcs(struct drm_i915_private *dev_priv)
2383 * No explicit RPCS request is needed to ensure full
2384 * slice/subslice/EU enablement prior to Gen9.
2386 if (INTEL_GEN(dev_priv) < 9)
2390 * Starting in Gen9, render power gating can leave
2391 * slice/subslice/EU in a partially enabled state. We
2392 * must make an explicit request through RPCS for full
2395 if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
2396 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
2397 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) <<
2398 GEN8_RPCS_S_CNT_SHIFT;
2399 rpcs |= GEN8_RPCS_ENABLE;
2402 if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
2403 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
2404 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask[0]) <<
2405 GEN8_RPCS_SS_CNT_SHIFT;
2406 rpcs |= GEN8_RPCS_ENABLE;
2409 if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
2410 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
2411 GEN8_RPCS_EU_MIN_SHIFT;
2412 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
2413 GEN8_RPCS_EU_MAX_SHIFT;
2414 rpcs |= GEN8_RPCS_ENABLE;
2420 static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2422 u32 indirect_ctx_offset;
2424 switch (INTEL_GEN(engine->i915)) {
2426 MISSING_CASE(INTEL_GEN(engine->i915));
2429 indirect_ctx_offset =
2430 GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2433 indirect_ctx_offset =
2434 GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2437 indirect_ctx_offset =
2438 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2441 indirect_ctx_offset =
2442 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2446 return indirect_ctx_offset;
2449 static void execlists_init_reg_state(u32 *regs,
2450 struct i915_gem_context *ctx,
2451 struct intel_engine_cs *engine,
2452 struct intel_ring *ring)
2454 struct drm_i915_private *dev_priv = engine->i915;
2455 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
2456 u32 base = engine->mmio_base;
2457 bool rcs = engine->id == RCS;
2459 /* A context is actually a big batch buffer with several
2460 * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
2461 * values we are setting here are only for the first context restore:
2462 * on a subsequent save, the GPU will recreate this batchbuffer with new
2463 * values (including all the missing MI_LOAD_REGISTER_IMM commands that
2464 * we are not initializing here).
2466 regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) |
2467 MI_LRI_FORCE_POSTED;
2469 CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
2470 _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2471 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT) |
2472 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2473 (HAS_RESOURCE_STREAMER(dev_priv) ?
2474 CTX_CTRL_RS_CTX_ENABLE : 0)));
2475 CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0);
2476 CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0);
2477 CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0);
2478 CTX_REG(regs, CTX_RING_BUFFER_CONTROL, RING_CTL(base),
2479 RING_CTL_SIZE(ring->size) | RING_VALID);
2480 CTX_REG(regs, CTX_BB_HEAD_U, RING_BBADDR_UDW(base), 0);
2481 CTX_REG(regs, CTX_BB_HEAD_L, RING_BBADDR(base), 0);
2482 CTX_REG(regs, CTX_BB_STATE, RING_BBSTATE(base), RING_BB_PPGTT);
2483 CTX_REG(regs, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(base), 0);
2484 CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0);
2485 CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0);
2487 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
2489 CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0);
2490 CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET,
2491 RING_INDIRECT_CTX_OFFSET(base), 0);
2492 if (wa_ctx->indirect_ctx.size) {
2493 u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
2495 regs[CTX_RCS_INDIRECT_CTX + 1] =
2496 (ggtt_offset + wa_ctx->indirect_ctx.offset) |
2497 (wa_ctx->indirect_ctx.size / CACHELINE_BYTES);
2499 regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] =
2500 intel_lr_indirect_ctx_offset(engine) << 6;
2503 CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0);
2504 if (wa_ctx->per_ctx.size) {
2505 u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
2507 regs[CTX_BB_PER_CTX_PTR + 1] =
2508 (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
2512 regs[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
2514 CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0);
2515 /* PDP values well be assigned later if needed */
2516 CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3), 0);
2517 CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3), 0);
2518 CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2), 0);
2519 CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2), 0);
2520 CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1), 0);
2521 CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1), 0);
2522 CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
2523 CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
2525 if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) {
2526 /* 64b PPGTT (48bit canonical)
2527 * PDP0_DESCRIPTOR contains the base address to PML4 and
2528 * other PDP Descriptors are ignored.
2530 ASSIGN_CTX_PML4(ppgtt, regs);
2534 regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2535 CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2536 make_rpcs(dev_priv));
2538 i915_oa_init_reg_state(engine, ctx, regs);
2543 populate_lr_context(struct i915_gem_context *ctx,
2544 struct drm_i915_gem_object *ctx_obj,
2545 struct intel_engine_cs *engine,
2546 struct intel_ring *ring)
2552 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2554 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2558 vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
2559 if (IS_ERR(vaddr)) {
2560 ret = PTR_ERR(vaddr);
2561 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
2564 ctx_obj->mm.dirty = true;
2566 if (engine->default_state) {
2568 * We only want to copy over the template context state;
2569 * skipping over the headers reserved for GuC communication,
2570 * leaving those as zero.
2572 const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE;
2575 defaults = i915_gem_object_pin_map(engine->default_state,
2577 if (IS_ERR(defaults)) {
2578 ret = PTR_ERR(defaults);
2582 memcpy(vaddr + start, defaults + start, engine->context_size);
2583 i915_gem_object_unpin_map(engine->default_state);
2586 /* The second page of the context object contains some fields which must
2587 * be set up prior to the first execution. */
2588 regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
2589 execlists_init_reg_state(regs, ctx, engine, ring);
2590 if (!engine->default_state)
2591 regs[CTX_CONTEXT_CONTROL + 1] |=
2592 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
2593 if (ctx == ctx->i915->preempt_context && INTEL_GEN(engine->i915) < 11)
2594 regs[CTX_CONTEXT_CONTROL + 1] |=
2595 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2596 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
2599 i915_gem_object_unpin_map(ctx_obj);
2603 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2604 struct intel_engine_cs *engine)
2606 struct drm_i915_gem_object *ctx_obj;
2607 struct intel_context *ce = to_intel_context(ctx, engine);
2608 struct i915_vma *vma;
2609 uint32_t context_size;
2610 struct intel_ring *ring;
2611 struct i915_timeline *timeline;
2617 context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
2620 * Before the actual start of the context image, we insert a few pages
2621 * for our own use and for sharing with the GuC.
2623 context_size += LRC_HEADER_PAGES * PAGE_SIZE;
2625 ctx_obj = i915_gem_object_create(ctx->i915, context_size);
2626 if (IS_ERR(ctx_obj)) {
2627 ret = PTR_ERR(ctx_obj);
2628 goto error_deref_obj;
2631 vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
2634 goto error_deref_obj;
2637 timeline = i915_timeline_create(ctx->i915, ctx->name);
2638 if (IS_ERR(timeline)) {
2639 ret = PTR_ERR(timeline);
2640 goto error_deref_obj;
2643 ring = intel_engine_create_ring(engine, timeline, ctx->ring_size);
2644 i915_timeline_put(timeline);
2646 ret = PTR_ERR(ring);
2647 goto error_deref_obj;
2650 ret = populate_lr_context(ctx, ctx_obj, engine, ring);
2652 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
2653 goto error_ring_free;
2662 intel_ring_free(ring);
2664 i915_gem_object_put(ctx_obj);
2668 void intel_lr_context_resume(struct drm_i915_private *dev_priv)
2670 struct intel_engine_cs *engine;
2671 struct i915_gem_context *ctx;
2672 enum intel_engine_id id;
2674 /* Because we emit WA_TAIL_DWORDS there may be a disparity
2675 * between our bookkeeping in ce->ring->head and ce->ring->tail and
2676 * that stored in context. As we only write new commands from
2677 * ce->ring->tail onwards, everything before that is junk. If the GPU
2678 * starts reading from its RING_HEAD from the context, it may try to
2679 * execute that junk and die.
2681 * So to avoid that we reset the context images upon resume. For
2682 * simplicity, we just zero everything out.
2684 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
2685 for_each_engine(engine, dev_priv, id) {
2686 struct intel_context *ce =
2687 to_intel_context(ctx, engine);
2693 reg = i915_gem_object_pin_map(ce->state->obj,
2695 if (WARN_ON(IS_ERR(reg)))
2698 reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
2699 reg[CTX_RING_HEAD+1] = 0;
2700 reg[CTX_RING_TAIL+1] = 0;
2702 ce->state->obj->mm.dirty = true;
2703 i915_gem_object_unpin_map(ce->state->obj);
2705 intel_ring_reset(ce->ring, 0);
2710 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2711 #include "selftests/intel_lrc.c"