Merge tag 'fuse-update-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / intel_lrc.c
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Ben Widawsky <ben@bwidawsk.net>
25  *    Michel Thierry <michel.thierry@intel.com>
26  *    Thomas Daniel <thomas.daniel@intel.com>
27  *    Oscar Mateo <oscar.mateo@intel.com>
28  *
29  */
30
31 /**
32  * DOC: Logical Rings, Logical Ring Contexts and Execlists
33  *
34  * Motivation:
35  * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36  * These expanded contexts enable a number of new abilities, especially
37  * "Execlists" (also implemented in this file).
38  *
39  * One of the main differences with the legacy HW contexts is that logical
40  * ring contexts incorporate many more things to the context's state, like
41  * PDPs or ringbuffer control registers:
42  *
43  * The reason why PDPs are included in the context is straightforward: as
44  * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45  * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46  * instead, the GPU will do it for you on the context switch.
47  *
48  * But, what about the ringbuffer control registers (head, tail, etc..)?
49  * shouldn't we just need a set of those per engine command streamer? This is
50  * where the name "Logical Rings" starts to make sense: by virtualizing the
51  * rings, the engine cs shifts to a new "ring buffer" with every context
52  * switch. When you want to submit a workload to the GPU you: A) choose your
53  * context, B) find its appropriate virtualized ring, C) write commands to it
54  * and then, finally, D) tell the GPU to switch to that context.
55  *
56  * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57  * to a contexts is via a context execution list, ergo "Execlists".
58  *
59  * LRC implementation:
60  * Regarding the creation of contexts, we have:
61  *
62  * - One global default context.
63  * - One local default context for each opened fd.
64  * - One local extra context for each context create ioctl call.
65  *
66  * Now that ringbuffers belong per-context (and not per-engine, like before)
67  * and that contexts are uniquely tied to a given engine (and not reusable,
68  * like before) we need:
69  *
70  * - One ringbuffer per-engine inside each context.
71  * - One backing object per-engine inside each context.
72  *
73  * The global default context starts its life with these new objects fully
74  * allocated and populated. The local default context for each opened fd is
75  * more complex, because we don't know at creation time which engine is going
76  * to use them. To handle this, we have implemented a deferred creation of LR
77  * contexts:
78  *
79  * The local context starts its life as a hollow or blank holder, that only
80  * gets populated for a given engine once we receive an execbuffer. If later
81  * on we receive another execbuffer ioctl for the same context but a different
82  * engine, we allocate/populate a new ringbuffer and context backing object and
83  * so on.
84  *
85  * Finally, regarding local contexts created using the ioctl call: as they are
86  * only allowed with the render ring, we can allocate & populate them right
87  * away (no need to defer anything, at least for now).
88  *
89  * Execlists implementation:
90  * Execlists are the new method by which, on gen8+ hardware, workloads are
91  * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
92  * This method works as follows:
93  *
94  * When a request is committed, its commands (the BB start and any leading or
95  * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96  * for the appropriate context. The tail pointer in the hardware context is not
97  * updated at this time, but instead, kept by the driver in the ringbuffer
98  * structure. A structure representing this request is added to a request queue
99  * for the appropriate engine: this structure contains a copy of the context's
100  * tail after the request was written to the ring buffer and a pointer to the
101  * context itself.
102  *
103  * If the engine's request queue was empty before the request was added, the
104  * queue is processed immediately. Otherwise the queue will be processed during
105  * a context switch interrupt. In any case, elements on the queue will get sent
106  * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107  * globally unique 20-bits submission ID.
108  *
109  * When execution of a request completes, the GPU updates the context status
110  * buffer with a context complete event and generates a context switch interrupt.
111  * During the interrupt handling, the driver examines the events in the buffer:
112  * for each context complete event, if the announced ID matches that on the head
113  * of the request queue, then that request is retired and removed from the queue.
114  *
115  * After processing, if any requests were retired and the queue is not empty
116  * then a new execution list can be submitted. The two requests at the front of
117  * the queue are next to be submitted but since a context may not occur twice in
118  * an execution list, if subsequent requests have the same ID as the first then
119  * the two requests must be combined. This is done simply by discarding requests
120  * at the head of the queue until either only one requests is left (in which case
121  * we use a NULL second context) or the first two requests have unique IDs.
122  *
123  * By always executing the first two requests in the queue the driver ensures
124  * that the GPU is kept as busy as possible. In the case where a single context
125  * completes but a second context is still executing, the request for this second
126  * context will be at the head of the queue when we remove the first one. This
127  * request will then be resubmitted along with a new request for a different context,
128  * which will cause the hardware to continue executing the second request and queue
129  * the new request (the GPU detects the condition of a context getting preempted
130  * with the same context and optimizes the context switch flow by not doing
131  * preemption, but just sampling the new tail pointer).
132  *
133  */
134 #include <linux/interrupt.h>
135
136 #include <drm/i915_drm.h>
137 #include "i915_drv.h"
138 #include "i915_gem_render_state.h"
139 #include "i915_reset.h"
140 #include "i915_vgpu.h"
141 #include "intel_lrc_reg.h"
142 #include "intel_mocs.h"
143 #include "intel_workarounds.h"
144
145 #define RING_EXECLIST_QFULL             (1 << 0x2)
146 #define RING_EXECLIST1_VALID            (1 << 0x3)
147 #define RING_EXECLIST0_VALID            (1 << 0x4)
148 #define RING_EXECLIST_ACTIVE_STATUS     (3 << 0xE)
149 #define RING_EXECLIST1_ACTIVE           (1 << 0x11)
150 #define RING_EXECLIST0_ACTIVE           (1 << 0x12)
151
152 #define GEN8_CTX_STATUS_IDLE_ACTIVE     (1 << 0)
153 #define GEN8_CTX_STATUS_PREEMPTED       (1 << 1)
154 #define GEN8_CTX_STATUS_ELEMENT_SWITCH  (1 << 2)
155 #define GEN8_CTX_STATUS_ACTIVE_IDLE     (1 << 3)
156 #define GEN8_CTX_STATUS_COMPLETE        (1 << 4)
157 #define GEN8_CTX_STATUS_LITE_RESTORE    (1 << 15)
158
159 #define GEN8_CTX_STATUS_COMPLETED_MASK \
160          (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
161
162 /* Typical size of the average request (2 pipecontrols and a MI_BB) */
163 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */
164 #define WA_TAIL_DWORDS 2
165 #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
166
167 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
168                                             struct intel_engine_cs *engine,
169                                             struct intel_context *ce);
170 static void execlists_init_reg_state(u32 *reg_state,
171                                      struct i915_gem_context *ctx,
172                                      struct intel_engine_cs *engine,
173                                      struct intel_ring *ring);
174
175 static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
176 {
177         return (i915_ggtt_offset(engine->status_page.vma) +
178                 I915_GEM_HWS_INDEX_ADDR);
179 }
180
181 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
182 {
183         return rb_entry(rb, struct i915_priolist, node);
184 }
185
186 static inline int rq_prio(const struct i915_request *rq)
187 {
188         return rq->sched.attr.priority;
189 }
190
191 static int queue_prio(const struct intel_engine_execlists *execlists)
192 {
193         struct i915_priolist *p;
194         struct rb_node *rb;
195
196         rb = rb_first_cached(&execlists->queue);
197         if (!rb)
198                 return INT_MIN;
199
200         /*
201          * As the priolist[] are inverted, with the highest priority in [0],
202          * we have to flip the index value to become priority.
203          */
204         p = to_priolist(rb);
205         return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
206 }
207
208 static inline bool need_preempt(const struct intel_engine_cs *engine,
209                                 const struct i915_request *rq)
210 {
211         const int last_prio = rq_prio(rq);
212
213         if (!intel_engine_has_preemption(engine))
214                 return false;
215
216         if (i915_request_completed(rq))
217                 return false;
218
219         /*
220          * Check if the current priority hint merits a preemption attempt.
221          *
222          * We record the highest value priority we saw during rescheduling
223          * prior to this dequeue, therefore we know that if it is strictly
224          * less than the current tail of ESLP[0], we do not need to force
225          * a preempt-to-idle cycle.
226          *
227          * However, the priority hint is a mere hint that we may need to
228          * preempt. If that hint is stale or we may be trying to preempt
229          * ourselves, ignore the request.
230          */
231         if (!__execlists_need_preempt(engine->execlists.queue_priority_hint,
232                                       last_prio))
233                 return false;
234
235         /*
236          * Check against the first request in ELSP[1], it will, thanks to the
237          * power of PI, be the highest priority of that context.
238          */
239         if (!list_is_last(&rq->link, &engine->timeline.requests) &&
240             rq_prio(list_next_entry(rq, link)) > last_prio)
241                 return true;
242
243         /*
244          * If the inflight context did not trigger the preemption, then maybe
245          * it was the set of queued requests? Pick the highest priority in
246          * the queue (the first active priolist) and see if it deserves to be
247          * running instead of ELSP[0].
248          *
249          * The highest priority request in the queue can not be either
250          * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
251          * context, it's priority would not exceed ELSP[0] aka last_prio.
252          */
253         return queue_prio(&engine->execlists) > last_prio;
254 }
255
256 __maybe_unused static inline bool
257 assert_priority_queue(const struct intel_engine_execlists *execlists,
258                       const struct i915_request *prev,
259                       const struct i915_request *next)
260 {
261         if (!prev)
262                 return true;
263
264         /*
265          * Without preemption, the prev may refer to the still active element
266          * which we refuse to let go.
267          *
268          * Even with preemption, there are times when we think it is better not
269          * to preempt and leave an ostensibly lower priority request in flight.
270          */
271         if (port_request(execlists->port) == prev)
272                 return true;
273
274         return rq_prio(prev) >= rq_prio(next);
275 }
276
277 /*
278  * The context descriptor encodes various attributes of a context,
279  * including its GTT address and some flags. Because it's fairly
280  * expensive to calculate, we'll just do it once and cache the result,
281  * which remains valid until the context is unpinned.
282  *
283  * This is what a descriptor looks like, from LSB to MSB::
284  *
285  *      bits  0-11:    flags, GEN8_CTX_* (cached in ctx->desc_template)
286  *      bits 12-31:    LRCA, GTT address of (the HWSP of) this context
287  *      bits 32-52:    ctx ID, a globally unique tag (highest bit used by GuC)
288  *      bits 53-54:    mbz, reserved for use by hardware
289  *      bits 55-63:    group ID, currently unused and set to 0
290  *
291  * Starting from Gen11, the upper dword of the descriptor has a new format:
292  *
293  *      bits 32-36:    reserved
294  *      bits 37-47:    SW context ID
295  *      bits 48:53:    engine instance
296  *      bit 54:        mbz, reserved for use by hardware
297  *      bits 55-60:    SW counter
298  *      bits 61-63:    engine class
299  *
300  * engine info, SW context ID and SW counter need to form a unique number
301  * (Context ID) per lrc.
302  */
303 static void
304 intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
305                                    struct intel_engine_cs *engine,
306                                    struct intel_context *ce)
307 {
308         u64 desc;
309
310         BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
311         BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH)));
312
313         desc = ctx->desc_template;                              /* bits  0-11 */
314         GEM_BUG_ON(desc & GENMASK_ULL(63, 12));
315
316         desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE;
317                                                                 /* bits 12-31 */
318         GEM_BUG_ON(desc & GENMASK_ULL(63, 32));
319
320         /*
321          * The following 32bits are copied into the OA reports (dword 2).
322          * Consider updating oa_get_render_ctx_id in i915_perf.c when changing
323          * anything below.
324          */
325         if (INTEL_GEN(ctx->i915) >= 11) {
326                 GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
327                 desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
328                                                                 /* bits 37-47 */
329
330                 desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT;
331                                                                 /* bits 48-53 */
332
333                 /* TODO: decide what to do with SW counter (bits 55-60) */
334
335                 desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT;
336                                                                 /* bits 61-63 */
337         } else {
338                 GEM_BUG_ON(ctx->hw_id >= BIT(GEN8_CTX_ID_WIDTH));
339                 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT;   /* bits 32-52 */
340         }
341
342         ce->lrc_desc = desc;
343 }
344
345 static void unwind_wa_tail(struct i915_request *rq)
346 {
347         rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
348         assert_ring_tail_valid(rq->ring, rq->tail);
349 }
350
351 static struct i915_request *
352 __unwind_incomplete_requests(struct intel_engine_cs *engine)
353 {
354         struct i915_request *rq, *rn, *active = NULL;
355         struct list_head *uninitialized_var(pl);
356         int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT;
357
358         lockdep_assert_held(&engine->timeline.lock);
359
360         list_for_each_entry_safe_reverse(rq, rn,
361                                          &engine->timeline.requests,
362                                          link) {
363                 if (i915_request_completed(rq))
364                         break;
365
366                 __i915_request_unsubmit(rq);
367                 unwind_wa_tail(rq);
368
369                 GEM_BUG_ON(rq->hw_context->active);
370
371                 GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
372                 if (rq_prio(rq) != prio) {
373                         prio = rq_prio(rq);
374                         pl = i915_sched_lookup_priolist(engine, prio);
375                 }
376                 GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
377
378                 list_add(&rq->sched.link, pl);
379
380                 active = rq;
381         }
382
383         /*
384          * The active request is now effectively the start of a new client
385          * stream, so give it the equivalent small priority bump to prevent
386          * it being gazumped a second time by another peer.
387          */
388         if (!(prio & I915_PRIORITY_NEWCLIENT)) {
389                 prio |= I915_PRIORITY_NEWCLIENT;
390                 active->sched.attr.priority = prio;
391                 list_move_tail(&active->sched.link,
392                                i915_sched_lookup_priolist(engine, prio));
393         }
394
395         return active;
396 }
397
398 void
399 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
400 {
401         struct intel_engine_cs *engine =
402                 container_of(execlists, typeof(*engine), execlists);
403
404         __unwind_incomplete_requests(engine);
405 }
406
407 static inline void
408 execlists_context_status_change(struct i915_request *rq, unsigned long status)
409 {
410         /*
411          * Only used when GVT-g is enabled now. When GVT-g is disabled,
412          * The compiler should eliminate this function as dead-code.
413          */
414         if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
415                 return;
416
417         atomic_notifier_call_chain(&rq->engine->context_status_notifier,
418                                    status, rq);
419 }
420
421 inline void
422 execlists_user_begin(struct intel_engine_execlists *execlists,
423                      const struct execlist_port *port)
424 {
425         execlists_set_active_once(execlists, EXECLISTS_ACTIVE_USER);
426 }
427
428 inline void
429 execlists_user_end(struct intel_engine_execlists *execlists)
430 {
431         execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
432 }
433
434 static inline void
435 execlists_context_schedule_in(struct i915_request *rq)
436 {
437         GEM_BUG_ON(rq->hw_context->active);
438
439         execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
440         intel_engine_context_in(rq->engine);
441         rq->hw_context->active = rq->engine;
442 }
443
444 static inline void
445 execlists_context_schedule_out(struct i915_request *rq, unsigned long status)
446 {
447         rq->hw_context->active = NULL;
448         intel_engine_context_out(rq->engine);
449         execlists_context_status_change(rq, status);
450         trace_i915_request_out(rq);
451 }
452
453 static u64 execlists_update_context(struct i915_request *rq)
454 {
455         struct intel_context *ce = rq->hw_context;
456
457         ce->lrc_reg_state[CTX_RING_TAIL + 1] =
458                 intel_ring_set_tail(rq->ring, rq->tail);
459
460         /*
461          * Make sure the context image is complete before we submit it to HW.
462          *
463          * Ostensibly, writes (including the WCB) should be flushed prior to
464          * an uncached write such as our mmio register access, the empirical
465          * evidence (esp. on Braswell) suggests that the WC write into memory
466          * may not be visible to the HW prior to the completion of the UC
467          * register write and that we may begin execution from the context
468          * before its image is complete leading to invalid PD chasing.
469          *
470          * Furthermore, Braswell, at least, wants a full mb to be sure that
471          * the writes are coherent in memory (visible to the GPU) prior to
472          * execution, and not just visible to other CPUs (as is the result of
473          * wmb).
474          */
475         mb();
476         return ce->lrc_desc;
477 }
478
479 static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port)
480 {
481         if (execlists->ctrl_reg) {
482                 writel(lower_32_bits(desc), execlists->submit_reg + port * 2);
483                 writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1);
484         } else {
485                 writel(upper_32_bits(desc), execlists->submit_reg);
486                 writel(lower_32_bits(desc), execlists->submit_reg);
487         }
488 }
489
490 static void execlists_submit_ports(struct intel_engine_cs *engine)
491 {
492         struct intel_engine_execlists *execlists = &engine->execlists;
493         struct execlist_port *port = execlists->port;
494         unsigned int n;
495
496         /*
497          * We can skip acquiring intel_runtime_pm_get() here as it was taken
498          * on our behalf by the request (see i915_gem_mark_busy()) and it will
499          * not be relinquished until the device is idle (see
500          * i915_gem_idle_work_handler()). As a precaution, we make sure
501          * that all ELSP are drained i.e. we have processed the CSB,
502          * before allowing ourselves to idle and calling intel_runtime_pm_put().
503          */
504         GEM_BUG_ON(!engine->i915->gt.awake);
505
506         /*
507          * ELSQ note: the submit queue is not cleared after being submitted
508          * to the HW so we need to make sure we always clean it up. This is
509          * currently ensured by the fact that we always write the same number
510          * of elsq entries, keep this in mind before changing the loop below.
511          */
512         for (n = execlists_num_ports(execlists); n--; ) {
513                 struct i915_request *rq;
514                 unsigned int count;
515                 u64 desc;
516
517                 rq = port_unpack(&port[n], &count);
518                 if (rq) {
519                         GEM_BUG_ON(count > !n);
520                         if (!count++)
521                                 execlists_context_schedule_in(rq);
522                         port_set(&port[n], port_pack(rq, count));
523                         desc = execlists_update_context(rq);
524                         GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
525
526                         GEM_TRACE("%s in[%d]:  ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
527                                   engine->name, n,
528                                   port[n].context_id, count,
529                                   rq->global_seqno,
530                                   rq->fence.context, rq->fence.seqno,
531                                   hwsp_seqno(rq),
532                                   intel_engine_get_seqno(engine),
533                                   rq_prio(rq));
534                 } else {
535                         GEM_BUG_ON(!n);
536                         desc = 0;
537                 }
538
539                 write_desc(execlists, desc, n);
540         }
541
542         /* we need to manually load the submit queue */
543         if (execlists->ctrl_reg)
544                 writel(EL_CTRL_LOAD, execlists->ctrl_reg);
545
546         execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
547 }
548
549 static bool ctx_single_port_submission(const struct intel_context *ce)
550 {
551         return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
552                 i915_gem_context_force_single_submission(ce->gem_context));
553 }
554
555 static bool can_merge_ctx(const struct intel_context *prev,
556                           const struct intel_context *next)
557 {
558         if (prev != next)
559                 return false;
560
561         if (ctx_single_port_submission(prev))
562                 return false;
563
564         return true;
565 }
566
567 static void port_assign(struct execlist_port *port, struct i915_request *rq)
568 {
569         GEM_BUG_ON(rq == port_request(port));
570
571         if (port_isset(port))
572                 i915_request_put(port_request(port));
573
574         port_set(port, port_pack(i915_request_get(rq), port_count(port)));
575 }
576
577 static void inject_preempt_context(struct intel_engine_cs *engine)
578 {
579         struct intel_engine_execlists *execlists = &engine->execlists;
580         struct intel_context *ce =
581                 to_intel_context(engine->i915->preempt_context, engine);
582         unsigned int n;
583
584         GEM_BUG_ON(execlists->preempt_complete_status !=
585                    upper_32_bits(ce->lrc_desc));
586
587         /*
588          * Switch to our empty preempt context so
589          * the state of the GPU is known (idle).
590          */
591         GEM_TRACE("%s\n", engine->name);
592         for (n = execlists_num_ports(execlists); --n; )
593                 write_desc(execlists, 0, n);
594
595         write_desc(execlists, ce->lrc_desc, n);
596
597         /* we need to manually load the submit queue */
598         if (execlists->ctrl_reg)
599                 writel(EL_CTRL_LOAD, execlists->ctrl_reg);
600
601         execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
602         execlists_set_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
603
604         (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
605 }
606
607 static void complete_preempt_context(struct intel_engine_execlists *execlists)
608 {
609         GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
610
611         if (inject_preempt_hang(execlists))
612                 return;
613
614         execlists_cancel_port_requests(execlists);
615         __unwind_incomplete_requests(container_of(execlists,
616                                                   struct intel_engine_cs,
617                                                   execlists));
618 }
619
620 static void execlists_dequeue(struct intel_engine_cs *engine)
621 {
622         struct intel_engine_execlists * const execlists = &engine->execlists;
623         struct execlist_port *port = execlists->port;
624         const struct execlist_port * const last_port =
625                 &execlists->port[execlists->port_mask];
626         struct i915_request *last = port_request(port);
627         struct rb_node *rb;
628         bool submit = false;
629
630         /*
631          * Hardware submission is through 2 ports. Conceptually each port
632          * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
633          * static for a context, and unique to each, so we only execute
634          * requests belonging to a single context from each ring. RING_HEAD
635          * is maintained by the CS in the context image, it marks the place
636          * where it got up to last time, and through RING_TAIL we tell the CS
637          * where we want to execute up to this time.
638          *
639          * In this list the requests are in order of execution. Consecutive
640          * requests from the same context are adjacent in the ringbuffer. We
641          * can combine these requests into a single RING_TAIL update:
642          *
643          *              RING_HEAD...req1...req2
644          *                                    ^- RING_TAIL
645          * since to execute req2 the CS must first execute req1.
646          *
647          * Our goal then is to point each port to the end of a consecutive
648          * sequence of requests as being the most optimal (fewest wake ups
649          * and context switches) submission.
650          */
651
652         if (last) {
653                 /*
654                  * Don't resubmit or switch until all outstanding
655                  * preemptions (lite-restore) are seen. Then we
656                  * know the next preemption status we see corresponds
657                  * to this ELSP update.
658                  */
659                 GEM_BUG_ON(!execlists_is_active(execlists,
660                                                 EXECLISTS_ACTIVE_USER));
661                 GEM_BUG_ON(!port_count(&port[0]));
662
663                 /*
664                  * If we write to ELSP a second time before the HW has had
665                  * a chance to respond to the previous write, we can confuse
666                  * the HW and hit "undefined behaviour". After writing to ELSP,
667                  * we must then wait until we see a context-switch event from
668                  * the HW to indicate that it has had a chance to respond.
669                  */
670                 if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
671                         return;
672
673                 if (need_preempt(engine, last)) {
674                         inject_preempt_context(engine);
675                         return;
676                 }
677
678                 /*
679                  * In theory, we could coalesce more requests onto
680                  * the second port (the first port is active, with
681                  * no preemptions pending). However, that means we
682                  * then have to deal with the possible lite-restore
683                  * of the second port (as we submit the ELSP, there
684                  * may be a context-switch) but also we may complete
685                  * the resubmission before the context-switch. Ergo,
686                  * coalescing onto the second port will cause a
687                  * preemption event, but we cannot predict whether
688                  * that will affect port[0] or port[1].
689                  *
690                  * If the second port is already active, we can wait
691                  * until the next context-switch before contemplating
692                  * new requests. The GPU will be busy and we should be
693                  * able to resubmit the new ELSP before it idles,
694                  * avoiding pipeline bubbles (momentary pauses where
695                  * the driver is unable to keep up the supply of new
696                  * work). However, we have to double check that the
697                  * priorities of the ports haven't been switch.
698                  */
699                 if (port_count(&port[1]))
700                         return;
701
702                 /*
703                  * WaIdleLiteRestore:bdw,skl
704                  * Apply the wa NOOPs to prevent
705                  * ring:HEAD == rq:TAIL as we resubmit the
706                  * request. See gen8_emit_fini_breadcrumb() for
707                  * where we prepare the padding after the
708                  * end of the request.
709                  */
710                 last->tail = last->wa_tail;
711         }
712
713         while ((rb = rb_first_cached(&execlists->queue))) {
714                 struct i915_priolist *p = to_priolist(rb);
715                 struct i915_request *rq, *rn;
716                 int i;
717
718                 priolist_for_each_request_consume(rq, rn, p, i) {
719                         GEM_BUG_ON(!assert_priority_queue(execlists, last, rq));
720
721                         /*
722                          * Can we combine this request with the current port?
723                          * It has to be the same context/ringbuffer and not
724                          * have any exceptions (e.g. GVT saying never to
725                          * combine contexts).
726                          *
727                          * If we can combine the requests, we can execute both
728                          * by updating the RING_TAIL to point to the end of the
729                          * second request, and so we never need to tell the
730                          * hardware about the first.
731                          */
732                         if (last &&
733                             !can_merge_ctx(rq->hw_context, last->hw_context)) {
734                                 /*
735                                  * If we are on the second port and cannot
736                                  * combine this request with the last, then we
737                                  * are done.
738                                  */
739                                 if (port == last_port)
740                                         goto done;
741
742                                 /*
743                                  * If GVT overrides us we only ever submit
744                                  * port[0], leaving port[1] empty. Note that we
745                                  * also have to be careful that we don't queue
746                                  * the same context (even though a different
747                                  * request) to the second port.
748                                  */
749                                 if (ctx_single_port_submission(last->hw_context) ||
750                                     ctx_single_port_submission(rq->hw_context))
751                                         goto done;
752
753                                 GEM_BUG_ON(last->hw_context == rq->hw_context);
754
755                                 if (submit)
756                                         port_assign(port, last);
757                                 port++;
758
759                                 GEM_BUG_ON(port_isset(port));
760                         }
761
762                         list_del_init(&rq->sched.link);
763
764                         __i915_request_submit(rq);
765                         trace_i915_request_in(rq, port_index(port, execlists));
766
767                         last = rq;
768                         submit = true;
769                 }
770
771                 rb_erase_cached(&p->node, &execlists->queue);
772                 if (p->priority != I915_PRIORITY_NORMAL)
773                         kmem_cache_free(engine->i915->priorities, p);
774         }
775
776 done:
777         /*
778          * Here be a bit of magic! Or sleight-of-hand, whichever you prefer.
779          *
780          * We choose the priority hint such that if we add a request of greater
781          * priority than this, we kick the submission tasklet to decide on
782          * the right order of submitting the requests to hardware. We must
783          * also be prepared to reorder requests as they are in-flight on the
784          * HW. We derive the priority hint then as the first "hole" in
785          * the HW submission ports and if there are no available slots,
786          * the priority of the lowest executing request, i.e. last.
787          *
788          * When we do receive a higher priority request ready to run from the
789          * user, see queue_request(), the priority hint is bumped to that
790          * request triggering preemption on the next dequeue (or subsequent
791          * interrupt for secondary ports).
792          */
793         execlists->queue_priority_hint =
794                 port != execlists->port ? rq_prio(last) : INT_MIN;
795
796         if (submit) {
797                 port_assign(port, last);
798                 execlists_submit_ports(engine);
799         }
800
801         /* We must always keep the beast fed if we have work piled up */
802         GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
803                    !port_isset(execlists->port));
804
805         /* Re-evaluate the executing context setup after each preemptive kick */
806         if (last)
807                 execlists_user_begin(execlists, execlists->port);
808
809         /* If the engine is now idle, so should be the flag; and vice versa. */
810         GEM_BUG_ON(execlists_is_active(&engine->execlists,
811                                        EXECLISTS_ACTIVE_USER) ==
812                    !port_isset(engine->execlists.port));
813 }
814
815 void
816 execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
817 {
818         struct execlist_port *port = execlists->port;
819         unsigned int num_ports = execlists_num_ports(execlists);
820
821         while (num_ports-- && port_isset(port)) {
822                 struct i915_request *rq = port_request(port);
823
824                 GEM_TRACE("%s:port%u global=%d (fence %llx:%lld), (current %d:%d)\n",
825                           rq->engine->name,
826                           (unsigned int)(port - execlists->port),
827                           rq->global_seqno,
828                           rq->fence.context, rq->fence.seqno,
829                           hwsp_seqno(rq),
830                           intel_engine_get_seqno(rq->engine));
831
832                 GEM_BUG_ON(!execlists->active);
833                 execlists_context_schedule_out(rq,
834                                                i915_request_completed(rq) ?
835                                                INTEL_CONTEXT_SCHEDULE_OUT :
836                                                INTEL_CONTEXT_SCHEDULE_PREEMPTED);
837
838                 i915_request_put(rq);
839
840                 memset(port, 0, sizeof(*port));
841                 port++;
842         }
843
844         execlists_clear_all_active(execlists);
845 }
846
847 static inline void
848 invalidate_csb_entries(const u32 *first, const u32 *last)
849 {
850         clflush((void *)first);
851         clflush((void *)last);
852 }
853
854 static void reset_csb_pointers(struct intel_engine_execlists *execlists)
855 {
856         const unsigned int reset_value = GEN8_CSB_ENTRIES - 1;
857
858         /*
859          * After a reset, the HW starts writing into CSB entry [0]. We
860          * therefore have to set our HEAD pointer back one entry so that
861          * the *first* entry we check is entry 0. To complicate this further,
862          * as we don't wait for the first interrupt after reset, we have to
863          * fake the HW write to point back to the last entry so that our
864          * inline comparison of our cached head position against the last HW
865          * write works even before the first interrupt.
866          */
867         execlists->csb_head = reset_value;
868         WRITE_ONCE(*execlists->csb_write, reset_value);
869
870         invalidate_csb_entries(&execlists->csb_status[0],
871                                &execlists->csb_status[GEN8_CSB_ENTRIES - 1]);
872 }
873
874 static void nop_submission_tasklet(unsigned long data)
875 {
876         /* The driver is wedged; don't process any more events. */
877 }
878
879 static void execlists_cancel_requests(struct intel_engine_cs *engine)
880 {
881         struct intel_engine_execlists * const execlists = &engine->execlists;
882         struct i915_request *rq, *rn;
883         struct rb_node *rb;
884         unsigned long flags;
885
886         GEM_TRACE("%s current %d\n",
887                   engine->name, intel_engine_get_seqno(engine));
888
889         /*
890          * Before we call engine->cancel_requests(), we should have exclusive
891          * access to the submission state. This is arranged for us by the
892          * caller disabling the interrupt generation, the tasklet and other
893          * threads that may then access the same state, giving us a free hand
894          * to reset state. However, we still need to let lockdep be aware that
895          * we know this state may be accessed in hardirq context, so we
896          * disable the irq around this manipulation and we want to keep
897          * the spinlock focused on its duties and not accidentally conflate
898          * coverage to the submission's irq state. (Similarly, although we
899          * shouldn't need to disable irq around the manipulation of the
900          * submission's irq state, we also wish to remind ourselves that
901          * it is irq state.)
902          */
903         spin_lock_irqsave(&engine->timeline.lock, flags);
904
905         /* Cancel the requests on the HW and clear the ELSP tracker. */
906         execlists_cancel_port_requests(execlists);
907         execlists_user_end(execlists);
908
909         /* Mark all executing requests as skipped. */
910         list_for_each_entry(rq, &engine->timeline.requests, link) {
911                 GEM_BUG_ON(!rq->global_seqno);
912
913                 if (!i915_request_signaled(rq))
914                         dma_fence_set_error(&rq->fence, -EIO);
915
916                 i915_request_mark_complete(rq);
917         }
918
919         /* Flush the queued requests to the timeline list (for retiring). */
920         while ((rb = rb_first_cached(&execlists->queue))) {
921                 struct i915_priolist *p = to_priolist(rb);
922                 int i;
923
924                 priolist_for_each_request_consume(rq, rn, p, i) {
925                         list_del_init(&rq->sched.link);
926                         __i915_request_submit(rq);
927                         dma_fence_set_error(&rq->fence, -EIO);
928                         i915_request_mark_complete(rq);
929                 }
930
931                 rb_erase_cached(&p->node, &execlists->queue);
932                 if (p->priority != I915_PRIORITY_NORMAL)
933                         kmem_cache_free(engine->i915->priorities, p);
934         }
935
936         intel_write_status_page(engine,
937                                 I915_GEM_HWS_INDEX,
938                                 intel_engine_last_submit(engine));
939
940         /* Remaining _unready_ requests will be nop'ed when submitted */
941
942         execlists->queue_priority_hint = INT_MIN;
943         execlists->queue = RB_ROOT_CACHED;
944         GEM_BUG_ON(port_isset(execlists->port));
945
946         GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
947         execlists->tasklet.func = nop_submission_tasklet;
948
949         spin_unlock_irqrestore(&engine->timeline.lock, flags);
950 }
951
952 static inline bool
953 reset_in_progress(const struct intel_engine_execlists *execlists)
954 {
955         return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
956 }
957
958 static void process_csb(struct intel_engine_cs *engine)
959 {
960         struct intel_engine_execlists * const execlists = &engine->execlists;
961         struct execlist_port *port = execlists->port;
962         const u32 * const buf = execlists->csb_status;
963         u8 head, tail;
964
965         lockdep_assert_held(&engine->timeline.lock);
966
967         /*
968          * Note that csb_write, csb_status may be either in HWSP or mmio.
969          * When reading from the csb_write mmio register, we have to be
970          * careful to only use the GEN8_CSB_WRITE_PTR portion, which is
971          * the low 4bits. As it happens we know the next 4bits are always
972          * zero and so we can simply masked off the low u8 of the register
973          * and treat it identically to reading from the HWSP (without having
974          * to use explicit shifting and masking, and probably bifurcating
975          * the code to handle the legacy mmio read).
976          */
977         head = execlists->csb_head;
978         tail = READ_ONCE(*execlists->csb_write);
979         GEM_TRACE("%s cs-irq head=%d, tail=%d\n", engine->name, head, tail);
980         if (unlikely(head == tail))
981                 return;
982
983         /*
984          * Hopefully paired with a wmb() in HW!
985          *
986          * We must complete the read of the write pointer before any reads
987          * from the CSB, so that we do not see stale values. Without an rmb
988          * (lfence) the HW may speculatively perform the CSB[] reads *before*
989          * we perform the READ_ONCE(*csb_write).
990          */
991         rmb();
992
993         do {
994                 struct i915_request *rq;
995                 unsigned int status;
996                 unsigned int count;
997
998                 if (++head == GEN8_CSB_ENTRIES)
999                         head = 0;
1000
1001                 /*
1002                  * We are flying near dragons again.
1003                  *
1004                  * We hold a reference to the request in execlist_port[]
1005                  * but no more than that. We are operating in softirq
1006                  * context and so cannot hold any mutex or sleep. That
1007                  * prevents us stopping the requests we are processing
1008                  * in port[] from being retired simultaneously (the
1009                  * breadcrumb will be complete before we see the
1010                  * context-switch). As we only hold the reference to the
1011                  * request, any pointer chasing underneath the request
1012                  * is subject to a potential use-after-free. Thus we
1013                  * store all of the bookkeeping within port[] as
1014                  * required, and avoid using unguarded pointers beneath
1015                  * request itself. The same applies to the atomic
1016                  * status notifier.
1017                  */
1018
1019                 GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
1020                           engine->name, head,
1021                           buf[2 * head + 0], buf[2 * head + 1],
1022                           execlists->active);
1023
1024                 status = buf[2 * head];
1025                 if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
1026                               GEN8_CTX_STATUS_PREEMPTED))
1027                         execlists_set_active(execlists,
1028                                              EXECLISTS_ACTIVE_HWACK);
1029                 if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
1030                         execlists_clear_active(execlists,
1031                                                EXECLISTS_ACTIVE_HWACK);
1032
1033                 if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
1034                         continue;
1035
1036                 /* We should never get a COMPLETED | IDLE_ACTIVE! */
1037                 GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
1038
1039                 if (status & GEN8_CTX_STATUS_COMPLETE &&
1040                     buf[2*head + 1] == execlists->preempt_complete_status) {
1041                         GEM_TRACE("%s preempt-idle\n", engine->name);
1042                         complete_preempt_context(execlists);
1043                         continue;
1044                 }
1045
1046                 if (status & GEN8_CTX_STATUS_PREEMPTED &&
1047                     execlists_is_active(execlists,
1048                                         EXECLISTS_ACTIVE_PREEMPT))
1049                         continue;
1050
1051                 GEM_BUG_ON(!execlists_is_active(execlists,
1052                                                 EXECLISTS_ACTIVE_USER));
1053
1054                 rq = port_unpack(port, &count);
1055                 GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
1056                           engine->name,
1057                           port->context_id, count,
1058                           rq ? rq->global_seqno : 0,
1059                           rq ? rq->fence.context : 0,
1060                           rq ? rq->fence.seqno : 0,
1061                           rq ? hwsp_seqno(rq) : 0,
1062                           intel_engine_get_seqno(engine),
1063                           rq ? rq_prio(rq) : 0);
1064
1065                 /* Check the context/desc id for this event matches */
1066                 GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
1067
1068                 GEM_BUG_ON(count == 0);
1069                 if (--count == 0) {
1070                         /*
1071                          * On the final event corresponding to the
1072                          * submission of this context, we expect either
1073                          * an element-switch event or a completion
1074                          * event (and on completion, the active-idle
1075                          * marker). No more preemptions, lite-restore
1076                          * or otherwise.
1077                          */
1078                         GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
1079                         GEM_BUG_ON(port_isset(&port[1]) &&
1080                                    !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
1081                         GEM_BUG_ON(!port_isset(&port[1]) &&
1082                                    !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
1083
1084                         /*
1085                          * We rely on the hardware being strongly
1086                          * ordered, that the breadcrumb write is
1087                          * coherent (visible from the CPU) before the
1088                          * user interrupt and CSB is processed.
1089                          */
1090                         GEM_BUG_ON(!i915_request_completed(rq));
1091
1092                         execlists_context_schedule_out(rq,
1093                                                        INTEL_CONTEXT_SCHEDULE_OUT);
1094                         i915_request_put(rq);
1095
1096                         GEM_TRACE("%s completed ctx=%d\n",
1097                                   engine->name, port->context_id);
1098
1099                         port = execlists_port_complete(execlists, port);
1100                         if (port_isset(port))
1101                                 execlists_user_begin(execlists, port);
1102                         else
1103                                 execlists_user_end(execlists);
1104                 } else {
1105                         port_set(port, port_pack(rq, count));
1106                 }
1107         } while (head != tail);
1108
1109         execlists->csb_head = head;
1110
1111         /*
1112          * Gen11 has proven to fail wrt global observation point between
1113          * entry and tail update, failing on the ordering and thus
1114          * we see an old entry in the context status buffer.
1115          *
1116          * Forcibly evict out entries for the next gpu csb update,
1117          * to increase the odds that we get a fresh entries with non
1118          * working hardware. The cost for doing so comes out mostly with
1119          * the wash as hardware, working or not, will need to do the
1120          * invalidation before.
1121          */
1122         invalidate_csb_entries(&buf[0], &buf[GEN8_CSB_ENTRIES - 1]);
1123 }
1124
1125 static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
1126 {
1127         lockdep_assert_held(&engine->timeline.lock);
1128
1129         process_csb(engine);
1130         if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT))
1131                 execlists_dequeue(engine);
1132 }
1133
1134 /*
1135  * Check the unread Context Status Buffers and manage the submission of new
1136  * contexts to the ELSP accordingly.
1137  */
1138 static void execlists_submission_tasklet(unsigned long data)
1139 {
1140         struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
1141         unsigned long flags;
1142
1143         GEM_TRACE("%s awake?=%d, active=%x\n",
1144                   engine->name,
1145                   !!engine->i915->gt.awake,
1146                   engine->execlists.active);
1147
1148         spin_lock_irqsave(&engine->timeline.lock, flags);
1149         __execlists_submission_tasklet(engine);
1150         spin_unlock_irqrestore(&engine->timeline.lock, flags);
1151 }
1152
1153 static void queue_request(struct intel_engine_cs *engine,
1154                           struct i915_sched_node *node,
1155                           int prio)
1156 {
1157         list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
1158 }
1159
1160 static void __submit_queue_imm(struct intel_engine_cs *engine)
1161 {
1162         struct intel_engine_execlists * const execlists = &engine->execlists;
1163
1164         if (reset_in_progress(execlists))
1165                 return; /* defer until we restart the engine following reset */
1166
1167         if (execlists->tasklet.func == execlists_submission_tasklet)
1168                 __execlists_submission_tasklet(engine);
1169         else
1170                 tasklet_hi_schedule(&execlists->tasklet);
1171 }
1172
1173 static void submit_queue(struct intel_engine_cs *engine, int prio)
1174 {
1175         if (prio > engine->execlists.queue_priority_hint) {
1176                 engine->execlists.queue_priority_hint = prio;
1177                 __submit_queue_imm(engine);
1178         }
1179 }
1180
1181 static void execlists_submit_request(struct i915_request *request)
1182 {
1183         struct intel_engine_cs *engine = request->engine;
1184         unsigned long flags;
1185
1186         /* Will be called from irq-context when using foreign fences. */
1187         spin_lock_irqsave(&engine->timeline.lock, flags);
1188
1189         queue_request(engine, &request->sched, rq_prio(request));
1190
1191         GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
1192         GEM_BUG_ON(list_empty(&request->sched.link));
1193
1194         submit_queue(engine, rq_prio(request));
1195
1196         spin_unlock_irqrestore(&engine->timeline.lock, flags);
1197 }
1198
1199 static void execlists_context_destroy(struct intel_context *ce)
1200 {
1201         GEM_BUG_ON(ce->pin_count);
1202
1203         if (!ce->state)
1204                 return;
1205
1206         intel_ring_free(ce->ring);
1207
1208         GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
1209         i915_gem_object_put(ce->state->obj);
1210 }
1211
1212 static void execlists_context_unpin(struct intel_context *ce)
1213 {
1214         struct intel_engine_cs *engine;
1215
1216         /*
1217          * The tasklet may still be using a pointer to our state, via an
1218          * old request. However, since we know we only unpin the context
1219          * on retirement of the following request, we know that the last
1220          * request referencing us will have had a completion CS interrupt.
1221          * If we see that it is still active, it means that the tasklet hasn't
1222          * had the chance to run yet; let it run before we teardown the
1223          * reference it may use.
1224          */
1225         engine = READ_ONCE(ce->active);
1226         if (unlikely(engine)) {
1227                 unsigned long flags;
1228
1229                 spin_lock_irqsave(&engine->timeline.lock, flags);
1230                 process_csb(engine);
1231                 spin_unlock_irqrestore(&engine->timeline.lock, flags);
1232
1233                 GEM_BUG_ON(READ_ONCE(ce->active));
1234         }
1235
1236         i915_gem_context_unpin_hw_id(ce->gem_context);
1237
1238         intel_ring_unpin(ce->ring);
1239
1240         ce->state->obj->pin_global--;
1241         i915_gem_object_unpin_map(ce->state->obj);
1242         i915_vma_unpin(ce->state);
1243
1244         i915_gem_context_put(ce->gem_context);
1245 }
1246
1247 static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
1248 {
1249         unsigned int flags;
1250         int err;
1251
1252         /*
1253          * Clear this page out of any CPU caches for coherent swap-in/out.
1254          * We only want to do this on the first bind so that we do not stall
1255          * on an active context (which by nature is already on the GPU).
1256          */
1257         if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1258                 err = i915_gem_object_set_to_wc_domain(vma->obj, true);
1259                 if (err)
1260                         return err;
1261         }
1262
1263         flags = PIN_GLOBAL | PIN_HIGH;
1264         flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
1265
1266         return i915_vma_pin(vma, 0, 0, flags);
1267 }
1268
1269 static void
1270 __execlists_update_reg_state(struct intel_engine_cs *engine,
1271                              struct intel_context *ce)
1272 {
1273         u32 *regs = ce->lrc_reg_state;
1274         struct intel_ring *ring = ce->ring;
1275
1276         regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
1277         regs[CTX_RING_HEAD + 1] = ring->head;
1278         regs[CTX_RING_TAIL + 1] = ring->tail;
1279
1280         /* RPCS */
1281         if (engine->class == RENDER_CLASS)
1282                 regs[CTX_R_PWR_CLK_STATE + 1] = gen8_make_rpcs(engine->i915,
1283                                                                &ce->sseu);
1284 }
1285
1286 static struct intel_context *
1287 __execlists_context_pin(struct intel_engine_cs *engine,
1288                         struct i915_gem_context *ctx,
1289                         struct intel_context *ce)
1290 {
1291         void *vaddr;
1292         int ret;
1293
1294         ret = execlists_context_deferred_alloc(ctx, engine, ce);
1295         if (ret)
1296                 goto err;
1297         GEM_BUG_ON(!ce->state);
1298
1299         ret = __context_pin(ctx, ce->state);
1300         if (ret)
1301                 goto err;
1302
1303         vaddr = i915_gem_object_pin_map(ce->state->obj,
1304                                         i915_coherent_map_type(ctx->i915) |
1305                                         I915_MAP_OVERRIDE);
1306         if (IS_ERR(vaddr)) {
1307                 ret = PTR_ERR(vaddr);
1308                 goto unpin_vma;
1309         }
1310
1311         ret = intel_ring_pin(ce->ring);
1312         if (ret)
1313                 goto unpin_map;
1314
1315         ret = i915_gem_context_pin_hw_id(ctx);
1316         if (ret)
1317                 goto unpin_ring;
1318
1319         intel_lr_context_descriptor_update(ctx, engine, ce);
1320
1321         GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
1322
1323         ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
1324
1325         __execlists_update_reg_state(engine, ce);
1326
1327         ce->state->obj->pin_global++;
1328         i915_gem_context_get(ctx);
1329         return ce;
1330
1331 unpin_ring:
1332         intel_ring_unpin(ce->ring);
1333 unpin_map:
1334         i915_gem_object_unpin_map(ce->state->obj);
1335 unpin_vma:
1336         __i915_vma_unpin(ce->state);
1337 err:
1338         ce->pin_count = 0;
1339         return ERR_PTR(ret);
1340 }
1341
1342 static const struct intel_context_ops execlists_context_ops = {
1343         .unpin = execlists_context_unpin,
1344         .destroy = execlists_context_destroy,
1345 };
1346
1347 static struct intel_context *
1348 execlists_context_pin(struct intel_engine_cs *engine,
1349                       struct i915_gem_context *ctx)
1350 {
1351         struct intel_context *ce = to_intel_context(ctx, engine);
1352
1353         lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1354         GEM_BUG_ON(!ctx->ppgtt);
1355
1356         if (likely(ce->pin_count++))
1357                 return ce;
1358         GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
1359
1360         ce->ops = &execlists_context_ops;
1361
1362         return __execlists_context_pin(engine, ctx, ce);
1363 }
1364
1365 static int gen8_emit_init_breadcrumb(struct i915_request *rq)
1366 {
1367         u32 *cs;
1368
1369         GEM_BUG_ON(!rq->timeline->has_initial_breadcrumb);
1370
1371         cs = intel_ring_begin(rq, 6);
1372         if (IS_ERR(cs))
1373                 return PTR_ERR(cs);
1374
1375         /*
1376          * Check if we have been preempted before we even get started.
1377          *
1378          * After this point i915_request_started() reports true, even if
1379          * we get preempted and so are no longer running.
1380          */
1381         *cs++ = MI_ARB_CHECK;
1382         *cs++ = MI_NOOP;
1383
1384         *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
1385         *cs++ = rq->timeline->hwsp_offset;
1386         *cs++ = 0;
1387         *cs++ = rq->fence.seqno - 1;
1388
1389         intel_ring_advance(rq, cs);
1390         return 0;
1391 }
1392
1393 static int emit_pdps(struct i915_request *rq)
1394 {
1395         const struct intel_engine_cs * const engine = rq->engine;
1396         struct i915_hw_ppgtt * const ppgtt = rq->gem_context->ppgtt;
1397         int err, i;
1398         u32 *cs;
1399
1400         GEM_BUG_ON(intel_vgpu_active(rq->i915));
1401
1402         /*
1403          * Beware ye of the dragons, this sequence is magic!
1404          *
1405          * Small changes to this sequence can cause anything from
1406          * GPU hangs to forcewake errors and machine lockups!
1407          */
1408
1409         /* Flush any residual operations from the context load */
1410         err = engine->emit_flush(rq, EMIT_FLUSH);
1411         if (err)
1412                 return err;
1413
1414         /* Magic required to prevent forcewake errors! */
1415         err = engine->emit_flush(rq, EMIT_INVALIDATE);
1416         if (err)
1417                 return err;
1418
1419         cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1420         if (IS_ERR(cs))
1421                 return PTR_ERR(cs);
1422
1423         /* Ensure the LRI have landed before we invalidate & continue */
1424         *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
1425         for (i = GEN8_3LVL_PDPES; i--; ) {
1426                 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1427
1428                 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
1429                 *cs++ = upper_32_bits(pd_daddr);
1430                 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
1431                 *cs++ = lower_32_bits(pd_daddr);
1432         }
1433         *cs++ = MI_NOOP;
1434
1435         intel_ring_advance(rq, cs);
1436
1437         /* Be doubly sure the LRI have landed before proceeding */
1438         err = engine->emit_flush(rq, EMIT_FLUSH);
1439         if (err)
1440                 return err;
1441
1442         /* Re-invalidate the TLB for luck */
1443         return engine->emit_flush(rq, EMIT_INVALIDATE);
1444 }
1445
1446 static int execlists_request_alloc(struct i915_request *request)
1447 {
1448         int ret;
1449
1450         GEM_BUG_ON(!request->hw_context->pin_count);
1451
1452         /*
1453          * Flush enough space to reduce the likelihood of waiting after
1454          * we start building the request - in which case we will just
1455          * have to repeat work.
1456          */
1457         request->reserved_space += EXECLISTS_REQUEST_SIZE;
1458
1459         /*
1460          * Note that after this point, we have committed to using
1461          * this request as it is being used to both track the
1462          * state of engine initialisation and liveness of the
1463          * golden renderstate above. Think twice before you try
1464          * to cancel/unwind this request now.
1465          */
1466
1467         /* Unconditionally invalidate GPU caches and TLBs. */
1468         if (i915_vm_is_48bit(&request->gem_context->ppgtt->vm))
1469                 ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
1470         else
1471                 ret = emit_pdps(request);
1472         if (ret)
1473                 return ret;
1474
1475         request->reserved_space -= EXECLISTS_REQUEST_SIZE;
1476         return 0;
1477 }
1478
1479 /*
1480  * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
1481  * PIPE_CONTROL instruction. This is required for the flush to happen correctly
1482  * but there is a slight complication as this is applied in WA batch where the
1483  * values are only initialized once so we cannot take register value at the
1484  * beginning and reuse it further; hence we save its value to memory, upload a
1485  * constant value with bit21 set and then we restore it back with the saved value.
1486  * To simplify the WA, a constant value is formed by using the default value
1487  * of this register. This shouldn't be a problem because we are only modifying
1488  * it for a short period and this batch in non-premptible. We can ofcourse
1489  * use additional instructions that read the actual value of the register
1490  * at that time and set our bit of interest but it makes the WA complicated.
1491  *
1492  * This WA is also required for Gen9 so extracting as a function avoids
1493  * code duplication.
1494  */
1495 static u32 *
1496 gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
1497 {
1498         /* NB no one else is allowed to scribble over scratch + 256! */
1499         *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
1500         *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
1501         *batch++ = i915_scratch_offset(engine->i915) + 256;
1502         *batch++ = 0;
1503
1504         *batch++ = MI_LOAD_REGISTER_IMM(1);
1505         *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
1506         *batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES;
1507
1508         batch = gen8_emit_pipe_control(batch,
1509                                        PIPE_CONTROL_CS_STALL |
1510                                        PIPE_CONTROL_DC_FLUSH_ENABLE,
1511                                        0);
1512
1513         *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
1514         *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
1515         *batch++ = i915_scratch_offset(engine->i915) + 256;
1516         *batch++ = 0;
1517
1518         return batch;
1519 }
1520
1521 /*
1522  * Typically we only have one indirect_ctx and per_ctx batch buffer which are
1523  * initialized at the beginning and shared across all contexts but this field
1524  * helps us to have multiple batches at different offsets and select them based
1525  * on a criteria. At the moment this batch always start at the beginning of the page
1526  * and at this point we don't have multiple wa_ctx batch buffers.
1527  *
1528  * The number of WA applied are not known at the beginning; we use this field
1529  * to return the no of DWORDS written.
1530  *
1531  * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1532  * so it adds NOOPs as padding to make it cacheline aligned.
1533  * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1534  * makes a complete batch buffer.
1535  */
1536 static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1537 {
1538         /* WaDisableCtxRestoreArbitration:bdw,chv */
1539         *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1540
1541         /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1542         if (IS_BROADWELL(engine->i915))
1543                 batch = gen8_emit_flush_coherentl3_wa(engine, batch);
1544
1545         /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1546         /* Actual scratch location is at 128 bytes offset */
1547         batch = gen8_emit_pipe_control(batch,
1548                                        PIPE_CONTROL_FLUSH_L3 |
1549                                        PIPE_CONTROL_GLOBAL_GTT_IVB |
1550                                        PIPE_CONTROL_CS_STALL |
1551                                        PIPE_CONTROL_QW_WRITE,
1552                                        i915_scratch_offset(engine->i915) +
1553                                        2 * CACHELINE_BYTES);
1554
1555         *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1556
1557         /* Pad to end of cacheline */
1558         while ((unsigned long)batch % CACHELINE_BYTES)
1559                 *batch++ = MI_NOOP;
1560
1561         /*
1562          * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1563          * execution depends on the length specified in terms of cache lines
1564          * in the register CTX_RCS_INDIRECT_CTX
1565          */
1566
1567         return batch;
1568 }
1569
1570 struct lri {
1571         i915_reg_t reg;
1572         u32 value;
1573 };
1574
1575 static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count)
1576 {
1577         GEM_BUG_ON(!count || count > 63);
1578
1579         *batch++ = MI_LOAD_REGISTER_IMM(count);
1580         do {
1581                 *batch++ = i915_mmio_reg_offset(lri->reg);
1582                 *batch++ = lri->value;
1583         } while (lri++, --count);
1584         *batch++ = MI_NOOP;
1585
1586         return batch;
1587 }
1588
1589 static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1590 {
1591         static const struct lri lri[] = {
1592                 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
1593                 {
1594                         COMMON_SLICE_CHICKEN2,
1595                         __MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE,
1596                                        0),
1597                 },
1598
1599                 /* BSpec: 11391 */
1600                 {
1601                         FF_SLICE_CHICKEN,
1602                         __MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX,
1603                                        FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
1604                 },
1605
1606                 /* BSpec: 11299 */
1607                 {
1608                         _3D_CHICKEN3,
1609                         __MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX,
1610                                        _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
1611                 }
1612         };
1613
1614         *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1615
1616         /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
1617         batch = gen8_emit_flush_coherentl3_wa(engine, batch);
1618
1619         batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
1620
1621         /* WaMediaPoolStateCmdInWABB:bxt,glk */
1622         if (HAS_POOLED_EU(engine->i915)) {
1623                 /*
1624                  * EU pool configuration is setup along with golden context
1625                  * during context initialization. This value depends on
1626                  * device type (2x6 or 3x6) and needs to be updated based
1627                  * on which subslice is disabled especially for 2x6
1628                  * devices, however it is safe to load default
1629                  * configuration of 3x6 device instead of masking off
1630                  * corresponding bits because HW ignores bits of a disabled
1631                  * subslice and drops down to appropriate config. Please
1632                  * see render_state_setup() in i915_gem_render_state.c for
1633                  * possible configurations, to avoid duplication they are
1634                  * not shown here again.
1635                  */
1636                 *batch++ = GEN9_MEDIA_POOL_STATE;
1637                 *batch++ = GEN9_MEDIA_POOL_ENABLE;
1638                 *batch++ = 0x00777000;
1639                 *batch++ = 0;
1640                 *batch++ = 0;
1641                 *batch++ = 0;
1642         }
1643
1644         *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1645
1646         /* Pad to end of cacheline */
1647         while ((unsigned long)batch % CACHELINE_BYTES)
1648                 *batch++ = MI_NOOP;
1649
1650         return batch;
1651 }
1652
1653 static u32 *
1654 gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1655 {
1656         int i;
1657
1658         /*
1659          * WaPipeControlBefore3DStateSamplePattern: cnl
1660          *
1661          * Ensure the engine is idle prior to programming a
1662          * 3DSTATE_SAMPLE_PATTERN during a context restore.
1663          */
1664         batch = gen8_emit_pipe_control(batch,
1665                                        PIPE_CONTROL_CS_STALL,
1666                                        0);
1667         /*
1668          * WaPipeControlBefore3DStateSamplePattern says we need 4 dwords for
1669          * the PIPE_CONTROL followed by 12 dwords of 0x0, so 16 dwords in
1670          * total. However, a PIPE_CONTROL is 6 dwords long, not 4, which is
1671          * confusing. Since gen8_emit_pipe_control() already advances the
1672          * batch by 6 dwords, we advance the other 10 here, completing a
1673          * cacheline. It's not clear if the workaround requires this padding
1674          * before other commands, or if it's just the regular padding we would
1675          * already have for the workaround bb, so leave it here for now.
1676          */
1677         for (i = 0; i < 10; i++)
1678                 *batch++ = MI_NOOP;
1679
1680         /* Pad to end of cacheline */
1681         while ((unsigned long)batch % CACHELINE_BYTES)
1682                 *batch++ = MI_NOOP;
1683
1684         return batch;
1685 }
1686
1687 #define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE)
1688
1689 static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
1690 {
1691         struct drm_i915_gem_object *obj;
1692         struct i915_vma *vma;
1693         int err;
1694
1695         obj = i915_gem_object_create(engine->i915, CTX_WA_BB_OBJ_SIZE);
1696         if (IS_ERR(obj))
1697                 return PTR_ERR(obj);
1698
1699         vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
1700         if (IS_ERR(vma)) {
1701                 err = PTR_ERR(vma);
1702                 goto err;
1703         }
1704
1705         err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1706         if (err)
1707                 goto err;
1708
1709         engine->wa_ctx.vma = vma;
1710         return 0;
1711
1712 err:
1713         i915_gem_object_put(obj);
1714         return err;
1715 }
1716
1717 static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
1718 {
1719         i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
1720 }
1721
1722 typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
1723
1724 static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1725 {
1726         struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1727         struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
1728                                             &wa_ctx->per_ctx };
1729         wa_bb_func_t wa_bb_fn[2];
1730         struct page *page;
1731         void *batch, *batch_ptr;
1732         unsigned int i;
1733         int ret;
1734
1735         if (GEM_DEBUG_WARN_ON(engine->id != RCS))
1736                 return -EINVAL;
1737
1738         switch (INTEL_GEN(engine->i915)) {
1739         case 11:
1740                 return 0;
1741         case 10:
1742                 wa_bb_fn[0] = gen10_init_indirectctx_bb;
1743                 wa_bb_fn[1] = NULL;
1744                 break;
1745         case 9:
1746                 wa_bb_fn[0] = gen9_init_indirectctx_bb;
1747                 wa_bb_fn[1] = NULL;
1748                 break;
1749         case 8:
1750                 wa_bb_fn[0] = gen8_init_indirectctx_bb;
1751                 wa_bb_fn[1] = NULL;
1752                 break;
1753         default:
1754                 MISSING_CASE(INTEL_GEN(engine->i915));
1755                 return 0;
1756         }
1757
1758         ret = lrc_setup_wa_ctx(engine);
1759         if (ret) {
1760                 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1761                 return ret;
1762         }
1763
1764         page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
1765         batch = batch_ptr = kmap_atomic(page);
1766
1767         /*
1768          * Emit the two workaround batch buffers, recording the offset from the
1769          * start of the workaround batch buffer object for each and their
1770          * respective sizes.
1771          */
1772         for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
1773                 wa_bb[i]->offset = batch_ptr - batch;
1774                 if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
1775                                                   CACHELINE_BYTES))) {
1776                         ret = -EINVAL;
1777                         break;
1778                 }
1779                 if (wa_bb_fn[i])
1780                         batch_ptr = wa_bb_fn[i](engine, batch_ptr);
1781                 wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
1782         }
1783
1784         BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
1785
1786         kunmap_atomic(batch);
1787         if (ret)
1788                 lrc_destroy_wa_ctx(engine);
1789
1790         return ret;
1791 }
1792
1793 static void enable_execlists(struct intel_engine_cs *engine)
1794 {
1795         struct drm_i915_private *dev_priv = engine->i915;
1796
1797         intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
1798
1799         /*
1800          * Make sure we're not enabling the new 12-deep CSB
1801          * FIFO as that requires a slightly updated handling
1802          * in the ctx switch irq. Since we're currently only
1803          * using only 2 elements of the enhanced execlists the
1804          * deeper FIFO it's not needed and it's not worth adding
1805          * more statements to the irq handler to support it.
1806          */
1807         if (INTEL_GEN(dev_priv) >= 11)
1808                 I915_WRITE(RING_MODE_GEN7(engine),
1809                            _MASKED_BIT_DISABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
1810         else
1811                 I915_WRITE(RING_MODE_GEN7(engine),
1812                            _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1813
1814         I915_WRITE(RING_MI_MODE(engine->mmio_base),
1815                    _MASKED_BIT_DISABLE(STOP_RING));
1816
1817         I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1818                    i915_ggtt_offset(engine->status_page.vma));
1819         POSTING_READ(RING_HWS_PGA(engine->mmio_base));
1820 }
1821
1822 static bool unexpected_starting_state(struct intel_engine_cs *engine)
1823 {
1824         struct drm_i915_private *dev_priv = engine->i915;
1825         bool unexpected = false;
1826
1827         if (I915_READ(RING_MI_MODE(engine->mmio_base)) & STOP_RING) {
1828                 DRM_DEBUG_DRIVER("STOP_RING still set in RING_MI_MODE\n");
1829                 unexpected = true;
1830         }
1831
1832         return unexpected;
1833 }
1834
1835 static int gen8_init_common_ring(struct intel_engine_cs *engine)
1836 {
1837         intel_engine_apply_workarounds(engine);
1838         intel_engine_apply_whitelist(engine);
1839
1840         intel_mocs_init_engine(engine);
1841
1842         intel_engine_reset_breadcrumbs(engine);
1843
1844         if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
1845                 struct drm_printer p = drm_debug_printer(__func__);
1846
1847                 intel_engine_dump(engine, &p, NULL);
1848         }
1849
1850         enable_execlists(engine);
1851
1852         return 0;
1853 }
1854
1855 static void execlists_reset_prepare(struct intel_engine_cs *engine)
1856 {
1857         struct intel_engine_execlists * const execlists = &engine->execlists;
1858         unsigned long flags;
1859
1860         GEM_TRACE("%s: depth<-%d\n", engine->name,
1861                   atomic_read(&execlists->tasklet.count));
1862
1863         /*
1864          * Prevent request submission to the hardware until we have
1865          * completed the reset in i915_gem_reset_finish(). If a request
1866          * is completed by one engine, it may then queue a request
1867          * to a second via its execlists->tasklet *just* as we are
1868          * calling engine->init_hw() and also writing the ELSP.
1869          * Turning off the execlists->tasklet until the reset is over
1870          * prevents the race.
1871          */
1872         __tasklet_disable_sync_once(&execlists->tasklet);
1873         GEM_BUG_ON(!reset_in_progress(execlists));
1874
1875         /* And flush any current direct submission. */
1876         spin_lock_irqsave(&engine->timeline.lock, flags);
1877         process_csb(engine); /* drain preemption events */
1878         spin_unlock_irqrestore(&engine->timeline.lock, flags);
1879 }
1880
1881 static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
1882 {
1883         struct intel_engine_execlists * const execlists = &engine->execlists;
1884         struct i915_request *rq;
1885         unsigned long flags;
1886         u32 *regs;
1887
1888         spin_lock_irqsave(&engine->timeline.lock, flags);
1889
1890         /*
1891          * Catch up with any missed context-switch interrupts.
1892          *
1893          * Ideally we would just read the remaining CSB entries now that we
1894          * know the gpu is idle. However, the CSB registers are sometimes^W
1895          * often trashed across a GPU reset! Instead we have to rely on
1896          * guessing the missed context-switch events by looking at what
1897          * requests were completed.
1898          */
1899         execlists_cancel_port_requests(execlists);
1900
1901         /* Push back any incomplete requests for replay after the reset. */
1902         rq = __unwind_incomplete_requests(engine);
1903
1904         /* Following the reset, we need to reload the CSB read/write pointers */
1905         reset_csb_pointers(&engine->execlists);
1906
1907         GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
1908                   engine->name,
1909                   rq ? rq->global_seqno : 0,
1910                   intel_engine_get_seqno(engine),
1911                   yesno(stalled));
1912         if (!rq)
1913                 goto out_unlock;
1914
1915         /*
1916          * If the request was innocent, we leave the request in the ELSP
1917          * and will try to replay it on restarting. The context image may
1918          * have been corrupted by the reset, in which case we may have
1919          * to service a new GPU hang, but more likely we can continue on
1920          * without impact.
1921          *
1922          * If the request was guilty, we presume the context is corrupt
1923          * and have to at least restore the RING register in the context
1924          * image back to the expected values to skip over the guilty request.
1925          */
1926         i915_reset_request(rq, stalled);
1927         if (!stalled)
1928                 goto out_unlock;
1929
1930         /*
1931          * We want a simple context + ring to execute the breadcrumb update.
1932          * We cannot rely on the context being intact across the GPU hang,
1933          * so clear it and rebuild just what we need for the breadcrumb.
1934          * All pending requests for this context will be zapped, and any
1935          * future request will be after userspace has had the opportunity
1936          * to recreate its own state.
1937          */
1938         regs = rq->hw_context->lrc_reg_state;
1939         if (engine->pinned_default_state) {
1940                 memcpy(regs, /* skip restoring the vanilla PPHWSP */
1941                        engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
1942                        engine->context_size - PAGE_SIZE);
1943         }
1944
1945         /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
1946         rq->ring->head = intel_ring_wrap(rq->ring, rq->postfix);
1947         intel_ring_update_space(rq->ring);
1948
1949         execlists_init_reg_state(regs, rq->gem_context, engine, rq->ring);
1950         __execlists_update_reg_state(engine, rq->hw_context);
1951
1952 out_unlock:
1953         spin_unlock_irqrestore(&engine->timeline.lock, flags);
1954 }
1955
1956 static void execlists_reset_finish(struct intel_engine_cs *engine)
1957 {
1958         struct intel_engine_execlists * const execlists = &engine->execlists;
1959
1960         /*
1961          * After a GPU reset, we may have requests to replay. Do so now while
1962          * we still have the forcewake to be sure that the GPU is not allowed
1963          * to sleep before we restart and reload a context.
1964          *
1965          */
1966         GEM_BUG_ON(!reset_in_progress(execlists));
1967         if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
1968                 execlists->tasklet.func(execlists->tasklet.data);
1969
1970         tasklet_enable(&execlists->tasklet);
1971         GEM_TRACE("%s: depth->%d\n", engine->name,
1972                   atomic_read(&execlists->tasklet.count));
1973 }
1974
1975 static int gen8_emit_bb_start(struct i915_request *rq,
1976                               u64 offset, u32 len,
1977                               const unsigned int flags)
1978 {
1979         u32 *cs;
1980
1981         cs = intel_ring_begin(rq, 6);
1982         if (IS_ERR(cs))
1983                 return PTR_ERR(cs);
1984
1985         /*
1986          * WaDisableCtxRestoreArbitration:bdw,chv
1987          *
1988          * We don't need to perform MI_ARB_ENABLE as often as we do (in
1989          * particular all the gen that do not need the w/a at all!), if we
1990          * took care to make sure that on every switch into this context
1991          * (both ordinary and for preemption) that arbitrartion was enabled
1992          * we would be fine. However, there doesn't seem to be a downside to
1993          * being paranoid and making sure it is set before each batch and
1994          * every context-switch.
1995          *
1996          * Note that if we fail to enable arbitration before the request
1997          * is complete, then we do not see the context-switch interrupt and
1998          * the engine hangs (with RING_HEAD == RING_TAIL).
1999          *
2000          * That satisfies both the GPGPU w/a and our heavy-handed paranoia.
2001          */
2002         *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
2003
2004         /* FIXME(BDW): Address space and security selectors. */
2005         *cs++ = MI_BATCH_BUFFER_START_GEN8 |
2006                 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
2007         *cs++ = lower_32_bits(offset);
2008         *cs++ = upper_32_bits(offset);
2009
2010         *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
2011         *cs++ = MI_NOOP;
2012
2013         intel_ring_advance(rq, cs);
2014
2015         return 0;
2016 }
2017
2018 static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
2019 {
2020         struct drm_i915_private *dev_priv = engine->i915;
2021         I915_WRITE_IMR(engine,
2022                        ~(engine->irq_enable_mask | engine->irq_keep_mask));
2023         POSTING_READ_FW(RING_IMR(engine->mmio_base));
2024 }
2025
2026 static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
2027 {
2028         struct drm_i915_private *dev_priv = engine->i915;
2029         I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
2030 }
2031
2032 static int gen8_emit_flush(struct i915_request *request, u32 mode)
2033 {
2034         u32 cmd, *cs;
2035
2036         cs = intel_ring_begin(request, 4);
2037         if (IS_ERR(cs))
2038                 return PTR_ERR(cs);
2039
2040         cmd = MI_FLUSH_DW + 1;
2041
2042         /* We always require a command barrier so that subsequent
2043          * commands, such as breadcrumb interrupts, are strictly ordered
2044          * wrt the contents of the write cache being flushed to memory
2045          * (and thus being coherent from the CPU).
2046          */
2047         cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2048
2049         if (mode & EMIT_INVALIDATE) {
2050                 cmd |= MI_INVALIDATE_TLB;
2051                 if (request->engine->class == VIDEO_DECODE_CLASS)
2052                         cmd |= MI_INVALIDATE_BSD;
2053         }
2054
2055         *cs++ = cmd;
2056         *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
2057         *cs++ = 0; /* upper addr */
2058         *cs++ = 0; /* value */
2059         intel_ring_advance(request, cs);
2060
2061         return 0;
2062 }
2063
2064 static int gen8_emit_flush_render(struct i915_request *request,
2065                                   u32 mode)
2066 {
2067         struct intel_engine_cs *engine = request->engine;
2068         u32 scratch_addr =
2069                 i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
2070         bool vf_flush_wa = false, dc_flush_wa = false;
2071         u32 *cs, flags = 0;
2072         int len;
2073
2074         flags |= PIPE_CONTROL_CS_STALL;
2075
2076         if (mode & EMIT_FLUSH) {
2077                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
2078                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
2079                 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
2080                 flags |= PIPE_CONTROL_FLUSH_ENABLE;
2081         }
2082
2083         if (mode & EMIT_INVALIDATE) {
2084                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
2085                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
2086                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
2087                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
2088                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
2089                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
2090                 flags |= PIPE_CONTROL_QW_WRITE;
2091                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
2092
2093                 /*
2094                  * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
2095                  * pipe control.
2096                  */
2097                 if (IS_GEN(request->i915, 9))
2098                         vf_flush_wa = true;
2099
2100                 /* WaForGAMHang:kbl */
2101                 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
2102                         dc_flush_wa = true;
2103         }
2104
2105         len = 6;
2106
2107         if (vf_flush_wa)
2108                 len += 6;
2109
2110         if (dc_flush_wa)
2111                 len += 12;
2112
2113         cs = intel_ring_begin(request, len);
2114         if (IS_ERR(cs))
2115                 return PTR_ERR(cs);
2116
2117         if (vf_flush_wa)
2118                 cs = gen8_emit_pipe_control(cs, 0, 0);
2119
2120         if (dc_flush_wa)
2121                 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
2122                                             0);
2123
2124         cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
2125
2126         if (dc_flush_wa)
2127                 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
2128
2129         intel_ring_advance(request, cs);
2130
2131         return 0;
2132 }
2133
2134 /*
2135  * Reserve space for 2 NOOPs at the end of each request to be
2136  * used as a workaround for not being allowed to do lite
2137  * restore with HEAD==TAIL (WaIdleLiteRestore).
2138  */
2139 static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
2140 {
2141         /* Ensure there's always at least one preemption point per-request. */
2142         *cs++ = MI_ARB_CHECK;
2143         *cs++ = MI_NOOP;
2144         request->wa_tail = intel_ring_offset(request, cs);
2145
2146         return cs;
2147 }
2148
2149 static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
2150 {
2151         /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
2152         BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
2153
2154         cs = gen8_emit_ggtt_write(cs,
2155                                   request->fence.seqno,
2156                                   request->timeline->hwsp_offset);
2157
2158         cs = gen8_emit_ggtt_write(cs,
2159                                   request->global_seqno,
2160                                   intel_hws_seqno_address(request->engine));
2161
2162         *cs++ = MI_USER_INTERRUPT;
2163         *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
2164
2165         request->tail = intel_ring_offset(request, cs);
2166         assert_ring_tail_valid(request->ring, request->tail);
2167
2168         return gen8_emit_wa_tail(request, cs);
2169 }
2170
2171 static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
2172 {
2173         cs = gen8_emit_ggtt_write_rcs(cs,
2174                                       request->fence.seqno,
2175                                       request->timeline->hwsp_offset,
2176                                       PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
2177                                       PIPE_CONTROL_DEPTH_CACHE_FLUSH |
2178                                       PIPE_CONTROL_DC_FLUSH_ENABLE |
2179                                       PIPE_CONTROL_FLUSH_ENABLE |
2180                                       PIPE_CONTROL_CS_STALL);
2181
2182         cs = gen8_emit_ggtt_write_rcs(cs,
2183                                       request->global_seqno,
2184                                       intel_hws_seqno_address(request->engine),
2185                                       PIPE_CONTROL_CS_STALL);
2186
2187         *cs++ = MI_USER_INTERRUPT;
2188         *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
2189
2190         request->tail = intel_ring_offset(request, cs);
2191         assert_ring_tail_valid(request->ring, request->tail);
2192
2193         return gen8_emit_wa_tail(request, cs);
2194 }
2195
2196 static int gen8_init_rcs_context(struct i915_request *rq)
2197 {
2198         int ret;
2199
2200         ret = intel_engine_emit_ctx_wa(rq);
2201         if (ret)
2202                 return ret;
2203
2204         ret = intel_rcs_context_init_mocs(rq);
2205         /*
2206          * Failing to program the MOCS is non-fatal.The system will not
2207          * run at peak performance. So generate an error and carry on.
2208          */
2209         if (ret)
2210                 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
2211
2212         return i915_gem_render_state_emit(rq);
2213 }
2214
2215 /**
2216  * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
2217  * @engine: Engine Command Streamer.
2218  */
2219 void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
2220 {
2221         struct drm_i915_private *dev_priv;
2222
2223         /*
2224          * Tasklet cannot be active at this point due intel_mark_active/idle
2225          * so this is just for documentation.
2226          */
2227         if (WARN_ON(test_bit(TASKLET_STATE_SCHED,
2228                              &engine->execlists.tasklet.state)))
2229                 tasklet_kill(&engine->execlists.tasklet);
2230
2231         dev_priv = engine->i915;
2232
2233         if (engine->buffer) {
2234                 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
2235         }
2236
2237         if (engine->cleanup)
2238                 engine->cleanup(engine);
2239
2240         intel_engine_cleanup_common(engine);
2241
2242         lrc_destroy_wa_ctx(engine);
2243
2244         engine->i915 = NULL;
2245         dev_priv->engine[engine->id] = NULL;
2246         kfree(engine);
2247 }
2248
2249 void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
2250 {
2251         engine->submit_request = execlists_submit_request;
2252         engine->cancel_requests = execlists_cancel_requests;
2253         engine->schedule = i915_schedule;
2254         engine->execlists.tasklet.func = execlists_submission_tasklet;
2255
2256         engine->reset.prepare = execlists_reset_prepare;
2257
2258         engine->park = NULL;
2259         engine->unpark = NULL;
2260
2261         engine->flags |= I915_ENGINE_SUPPORTS_STATS;
2262         if (engine->i915->preempt_context)
2263                 engine->flags |= I915_ENGINE_HAS_PREEMPTION;
2264
2265         engine->i915->caps.scheduler =
2266                 I915_SCHEDULER_CAP_ENABLED |
2267                 I915_SCHEDULER_CAP_PRIORITY;
2268         if (intel_engine_has_preemption(engine))
2269                 engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION;
2270 }
2271
2272 static void
2273 logical_ring_default_vfuncs(struct intel_engine_cs *engine)
2274 {
2275         /* Default vfuncs which can be overriden by each engine. */
2276         engine->init_hw = gen8_init_common_ring;
2277
2278         engine->reset.prepare = execlists_reset_prepare;
2279         engine->reset.reset = execlists_reset;
2280         engine->reset.finish = execlists_reset_finish;
2281
2282         engine->context_pin = execlists_context_pin;
2283         engine->request_alloc = execlists_request_alloc;
2284
2285         engine->emit_flush = gen8_emit_flush;
2286         engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
2287         engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb;
2288
2289         engine->set_default_submission = intel_execlists_set_default_submission;
2290
2291         if (INTEL_GEN(engine->i915) < 11) {
2292                 engine->irq_enable = gen8_logical_ring_enable_irq;
2293                 engine->irq_disable = gen8_logical_ring_disable_irq;
2294         } else {
2295                 /*
2296                  * TODO: On Gen11 interrupt masks need to be clear
2297                  * to allow C6 entry. Keep interrupts enabled at
2298                  * and take the hit of generating extra interrupts
2299                  * until a more refined solution exists.
2300                  */
2301         }
2302         engine->emit_bb_start = gen8_emit_bb_start;
2303 }
2304
2305 static inline void
2306 logical_ring_default_irqs(struct intel_engine_cs *engine)
2307 {
2308         unsigned int shift = 0;
2309
2310         if (INTEL_GEN(engine->i915) < 11) {
2311                 const u8 irq_shifts[] = {
2312                         [RCS]  = GEN8_RCS_IRQ_SHIFT,
2313                         [BCS]  = GEN8_BCS_IRQ_SHIFT,
2314                         [VCS]  = GEN8_VCS1_IRQ_SHIFT,
2315                         [VCS2] = GEN8_VCS2_IRQ_SHIFT,
2316                         [VECS] = GEN8_VECS_IRQ_SHIFT,
2317                 };
2318
2319                 shift = irq_shifts[engine->id];
2320         }
2321
2322         engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
2323         engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
2324 }
2325
2326 static int
2327 logical_ring_setup(struct intel_engine_cs *engine)
2328 {
2329         int err;
2330
2331         err = intel_engine_setup_common(engine);
2332         if (err)
2333                 return err;
2334
2335         /* Intentionally left blank. */
2336         engine->buffer = NULL;
2337
2338         tasklet_init(&engine->execlists.tasklet,
2339                      execlists_submission_tasklet, (unsigned long)engine);
2340
2341         logical_ring_default_vfuncs(engine);
2342         logical_ring_default_irqs(engine);
2343
2344         return 0;
2345 }
2346
2347 static int logical_ring_init(struct intel_engine_cs *engine)
2348 {
2349         struct drm_i915_private *i915 = engine->i915;
2350         struct intel_engine_execlists * const execlists = &engine->execlists;
2351         int ret;
2352
2353         ret = intel_engine_init_common(engine);
2354         if (ret)
2355                 return ret;
2356
2357         intel_engine_init_workarounds(engine);
2358
2359         if (HAS_LOGICAL_RING_ELSQ(i915)) {
2360                 execlists->submit_reg = i915->regs +
2361                         i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
2362                 execlists->ctrl_reg = i915->regs +
2363                         i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine));
2364         } else {
2365                 execlists->submit_reg = i915->regs +
2366                         i915_mmio_reg_offset(RING_ELSP(engine));
2367         }
2368
2369         execlists->preempt_complete_status = ~0u;
2370         if (i915->preempt_context) {
2371                 struct intel_context *ce =
2372                         to_intel_context(i915->preempt_context, engine);
2373
2374                 execlists->preempt_complete_status =
2375                         upper_32_bits(ce->lrc_desc);
2376         }
2377
2378         execlists->csb_status =
2379                 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
2380
2381         execlists->csb_write =
2382                 &engine->status_page.addr[intel_hws_csb_write_index(i915)];
2383
2384         reset_csb_pointers(execlists);
2385
2386         return 0;
2387 }
2388
2389 int logical_render_ring_init(struct intel_engine_cs *engine)
2390 {
2391         int ret;
2392
2393         ret = logical_ring_setup(engine);
2394         if (ret)
2395                 return ret;
2396
2397         /* Override some for render ring. */
2398         engine->init_context = gen8_init_rcs_context;
2399         engine->emit_flush = gen8_emit_flush_render;
2400         engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs;
2401
2402         ret = logical_ring_init(engine);
2403         if (ret)
2404                 return ret;
2405
2406         ret = intel_init_workaround_bb(engine);
2407         if (ret) {
2408                 /*
2409                  * We continue even if we fail to initialize WA batch
2410                  * because we only expect rare glitches but nothing
2411                  * critical to prevent us from using GPU
2412                  */
2413                 DRM_ERROR("WA batch buffer initialization failed: %d\n",
2414                           ret);
2415         }
2416
2417         intel_engine_init_whitelist(engine);
2418
2419         return 0;
2420 }
2421
2422 int logical_xcs_ring_init(struct intel_engine_cs *engine)
2423 {
2424         int err;
2425
2426         err = logical_ring_setup(engine);
2427         if (err)
2428                 return err;
2429
2430         return logical_ring_init(engine);
2431 }
2432
2433 u32 gen8_make_rpcs(struct drm_i915_private *i915, struct intel_sseu *req_sseu)
2434 {
2435         const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
2436         bool subslice_pg = sseu->has_subslice_pg;
2437         struct intel_sseu ctx_sseu;
2438         u8 slices, subslices;
2439         u32 rpcs = 0;
2440
2441         /*
2442          * No explicit RPCS request is needed to ensure full
2443          * slice/subslice/EU enablement prior to Gen9.
2444         */
2445         if (INTEL_GEN(i915) < 9)
2446                 return 0;
2447
2448         /*
2449          * If i915/perf is active, we want a stable powergating configuration
2450          * on the system.
2451          *
2452          * We could choose full enablement, but on ICL we know there are use
2453          * cases which disable slices for functional, apart for performance
2454          * reasons. So in this case we select a known stable subset.
2455          */
2456         if (!i915->perf.oa.exclusive_stream) {
2457                 ctx_sseu = *req_sseu;
2458         } else {
2459                 ctx_sseu = intel_device_default_sseu(i915);
2460
2461                 if (IS_GEN(i915, 11)) {
2462                         /*
2463                          * We only need subslice count so it doesn't matter
2464                          * which ones we select - just turn off low bits in the
2465                          * amount of half of all available subslices per slice.
2466                          */
2467                         ctx_sseu.subslice_mask =
2468                                 ~(~0 << (hweight8(ctx_sseu.subslice_mask) / 2));
2469                         ctx_sseu.slice_mask = 0x1;
2470                 }
2471         }
2472
2473         slices = hweight8(ctx_sseu.slice_mask);
2474         subslices = hweight8(ctx_sseu.subslice_mask);
2475
2476         /*
2477          * Since the SScount bitfield in GEN8_R_PWR_CLK_STATE is only three bits
2478          * wide and Icelake has up to eight subslices, specfial programming is
2479          * needed in order to correctly enable all subslices.
2480          *
2481          * According to documentation software must consider the configuration
2482          * as 2x4x8 and hardware will translate this to 1x8x8.
2483          *
2484          * Furthemore, even though SScount is three bits, maximum documented
2485          * value for it is four. From this some rules/restrictions follow:
2486          *
2487          * 1.
2488          * If enabled subslice count is greater than four, two whole slices must
2489          * be enabled instead.
2490          *
2491          * 2.
2492          * When more than one slice is enabled, hardware ignores the subslice
2493          * count altogether.
2494          *
2495          * From these restrictions it follows that it is not possible to enable
2496          * a count of subslices between the SScount maximum of four restriction,
2497          * and the maximum available number on a particular SKU. Either all
2498          * subslices are enabled, or a count between one and four on the first
2499          * slice.
2500          */
2501         if (IS_GEN(i915, 11) &&
2502             slices == 1 &&
2503             subslices > min_t(u8, 4, hweight8(sseu->subslice_mask[0]) / 2)) {
2504                 GEM_BUG_ON(subslices & 1);
2505
2506                 subslice_pg = false;
2507                 slices *= 2;
2508         }
2509
2510         /*
2511          * Starting in Gen9, render power gating can leave
2512          * slice/subslice/EU in a partially enabled state. We
2513          * must make an explicit request through RPCS for full
2514          * enablement.
2515         */
2516         if (sseu->has_slice_pg) {
2517                 u32 mask, val = slices;
2518
2519                 if (INTEL_GEN(i915) >= 11) {
2520                         mask = GEN11_RPCS_S_CNT_MASK;
2521                         val <<= GEN11_RPCS_S_CNT_SHIFT;
2522                 } else {
2523                         mask = GEN8_RPCS_S_CNT_MASK;
2524                         val <<= GEN8_RPCS_S_CNT_SHIFT;
2525                 }
2526
2527                 GEM_BUG_ON(val & ~mask);
2528                 val &= mask;
2529
2530                 rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_S_CNT_ENABLE | val;
2531         }
2532
2533         if (subslice_pg) {
2534                 u32 val = subslices;
2535
2536                 val <<= GEN8_RPCS_SS_CNT_SHIFT;
2537
2538                 GEM_BUG_ON(val & ~GEN8_RPCS_SS_CNT_MASK);
2539                 val &= GEN8_RPCS_SS_CNT_MASK;
2540
2541                 rpcs |= GEN8_RPCS_ENABLE | GEN8_RPCS_SS_CNT_ENABLE | val;
2542         }
2543
2544         if (sseu->has_eu_pg) {
2545                 u32 val;
2546
2547                 val = ctx_sseu.min_eus_per_subslice << GEN8_RPCS_EU_MIN_SHIFT;
2548                 GEM_BUG_ON(val & ~GEN8_RPCS_EU_MIN_MASK);
2549                 val &= GEN8_RPCS_EU_MIN_MASK;
2550
2551                 rpcs |= val;
2552
2553                 val = ctx_sseu.max_eus_per_subslice << GEN8_RPCS_EU_MAX_SHIFT;
2554                 GEM_BUG_ON(val & ~GEN8_RPCS_EU_MAX_MASK);
2555                 val &= GEN8_RPCS_EU_MAX_MASK;
2556
2557                 rpcs |= val;
2558
2559                 rpcs |= GEN8_RPCS_ENABLE;
2560         }
2561
2562         return rpcs;
2563 }
2564
2565 static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2566 {
2567         u32 indirect_ctx_offset;
2568
2569         switch (INTEL_GEN(engine->i915)) {
2570         default:
2571                 MISSING_CASE(INTEL_GEN(engine->i915));
2572                 /* fall through */
2573         case 11:
2574                 indirect_ctx_offset =
2575                         GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2576                 break;
2577         case 10:
2578                 indirect_ctx_offset =
2579                         GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2580                 break;
2581         case 9:
2582                 indirect_ctx_offset =
2583                         GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2584                 break;
2585         case 8:
2586                 indirect_ctx_offset =
2587                         GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2588                 break;
2589         }
2590
2591         return indirect_ctx_offset;
2592 }
2593
2594 static void execlists_init_reg_state(u32 *regs,
2595                                      struct i915_gem_context *ctx,
2596                                      struct intel_engine_cs *engine,
2597                                      struct intel_ring *ring)
2598 {
2599         struct drm_i915_private *dev_priv = engine->i915;
2600         u32 base = engine->mmio_base;
2601         bool rcs = engine->class == RENDER_CLASS;
2602
2603         /* A context is actually a big batch buffer with several
2604          * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
2605          * values we are setting here are only for the first context restore:
2606          * on a subsequent save, the GPU will recreate this batchbuffer with new
2607          * values (including all the missing MI_LOAD_REGISTER_IMM commands that
2608          * we are not initializing here).
2609          */
2610         regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) |
2611                                  MI_LRI_FORCE_POSTED;
2612
2613         CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
2614                 _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
2615                 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
2616         if (INTEL_GEN(dev_priv) < 11) {
2617                 regs[CTX_CONTEXT_CONTROL + 1] |=
2618                         _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
2619                                             CTX_CTRL_RS_CTX_ENABLE);
2620         }
2621         CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0);
2622         CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0);
2623         CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0);
2624         CTX_REG(regs, CTX_RING_BUFFER_CONTROL, RING_CTL(base),
2625                 RING_CTL_SIZE(ring->size) | RING_VALID);
2626         CTX_REG(regs, CTX_BB_HEAD_U, RING_BBADDR_UDW(base), 0);
2627         CTX_REG(regs, CTX_BB_HEAD_L, RING_BBADDR(base), 0);
2628         CTX_REG(regs, CTX_BB_STATE, RING_BBSTATE(base), RING_BB_PPGTT);
2629         CTX_REG(regs, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(base), 0);
2630         CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0);
2631         CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0);
2632         if (rcs) {
2633                 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
2634
2635                 CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0);
2636                 CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET,
2637                         RING_INDIRECT_CTX_OFFSET(base), 0);
2638                 if (wa_ctx->indirect_ctx.size) {
2639                         u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
2640
2641                         regs[CTX_RCS_INDIRECT_CTX + 1] =
2642                                 (ggtt_offset + wa_ctx->indirect_ctx.offset) |
2643                                 (wa_ctx->indirect_ctx.size / CACHELINE_BYTES);
2644
2645                         regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] =
2646                                 intel_lr_indirect_ctx_offset(engine) << 6;
2647                 }
2648
2649                 CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0);
2650                 if (wa_ctx->per_ctx.size) {
2651                         u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
2652
2653                         regs[CTX_BB_PER_CTX_PTR + 1] =
2654                                 (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
2655                 }
2656         }
2657
2658         regs[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
2659
2660         CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0);
2661         /* PDP values well be assigned later if needed */
2662         CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3), 0);
2663         CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3), 0);
2664         CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2), 0);
2665         CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2), 0);
2666         CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1), 0);
2667         CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1), 0);
2668         CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
2669         CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
2670
2671         if (i915_vm_is_48bit(&ctx->ppgtt->vm)) {
2672                 /* 64b PPGTT (48bit canonical)
2673                  * PDP0_DESCRIPTOR contains the base address to PML4 and
2674                  * other PDP Descriptors are ignored.
2675                  */
2676                 ASSIGN_CTX_PML4(ctx->ppgtt, regs);
2677         } else {
2678                 ASSIGN_CTX_PDP(ctx->ppgtt, regs, 3);
2679                 ASSIGN_CTX_PDP(ctx->ppgtt, regs, 2);
2680                 ASSIGN_CTX_PDP(ctx->ppgtt, regs, 1);
2681                 ASSIGN_CTX_PDP(ctx->ppgtt, regs, 0);
2682         }
2683
2684         if (rcs) {
2685                 regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2686                 CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
2687
2688                 i915_oa_init_reg_state(engine, ctx, regs);
2689         }
2690
2691         regs[CTX_END] = MI_BATCH_BUFFER_END;
2692         if (INTEL_GEN(dev_priv) >= 10)
2693                 regs[CTX_END] |= BIT(0);
2694 }
2695
2696 static int
2697 populate_lr_context(struct i915_gem_context *ctx,
2698                     struct drm_i915_gem_object *ctx_obj,
2699                     struct intel_engine_cs *engine,
2700                     struct intel_ring *ring)
2701 {
2702         void *vaddr;
2703         u32 *regs;
2704         int ret;
2705
2706         ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2707         if (ret) {
2708                 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2709                 return ret;
2710         }
2711
2712         vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
2713         if (IS_ERR(vaddr)) {
2714                 ret = PTR_ERR(vaddr);
2715                 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
2716                 return ret;
2717         }
2718         ctx_obj->mm.dirty = true;
2719
2720         if (engine->default_state) {
2721                 /*
2722                  * We only want to copy over the template context state;
2723                  * skipping over the headers reserved for GuC communication,
2724                  * leaving those as zero.
2725                  */
2726                 const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE;
2727                 void *defaults;
2728
2729                 defaults = i915_gem_object_pin_map(engine->default_state,
2730                                                    I915_MAP_WB);
2731                 if (IS_ERR(defaults)) {
2732                         ret = PTR_ERR(defaults);
2733                         goto err_unpin_ctx;
2734                 }
2735
2736                 memcpy(vaddr + start, defaults + start, engine->context_size);
2737                 i915_gem_object_unpin_map(engine->default_state);
2738         }
2739
2740         /* The second page of the context object contains some fields which must
2741          * be set up prior to the first execution. */
2742         regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
2743         execlists_init_reg_state(regs, ctx, engine, ring);
2744         if (!engine->default_state)
2745                 regs[CTX_CONTEXT_CONTROL + 1] |=
2746                         _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
2747         if (ctx == ctx->i915->preempt_context && INTEL_GEN(engine->i915) < 11)
2748                 regs[CTX_CONTEXT_CONTROL + 1] |=
2749                         _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2750                                            CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
2751
2752 err_unpin_ctx:
2753         i915_gem_object_unpin_map(ctx_obj);
2754         return ret;
2755 }
2756
2757 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2758                                             struct intel_engine_cs *engine,
2759                                             struct intel_context *ce)
2760 {
2761         struct drm_i915_gem_object *ctx_obj;
2762         struct i915_vma *vma;
2763         u32 context_size;
2764         struct intel_ring *ring;
2765         struct i915_timeline *timeline;
2766         int ret;
2767
2768         if (ce->state)
2769                 return 0;
2770
2771         context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
2772
2773         /*
2774          * Before the actual start of the context image, we insert a few pages
2775          * for our own use and for sharing with the GuC.
2776          */
2777         context_size += LRC_HEADER_PAGES * PAGE_SIZE;
2778
2779         ctx_obj = i915_gem_object_create(ctx->i915, context_size);
2780         if (IS_ERR(ctx_obj))
2781                 return PTR_ERR(ctx_obj);
2782
2783         vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
2784         if (IS_ERR(vma)) {
2785                 ret = PTR_ERR(vma);
2786                 goto error_deref_obj;
2787         }
2788
2789         timeline = i915_timeline_create(ctx->i915, ctx->name, NULL);
2790         if (IS_ERR(timeline)) {
2791                 ret = PTR_ERR(timeline);
2792                 goto error_deref_obj;
2793         }
2794
2795         ring = intel_engine_create_ring(engine, timeline, ctx->ring_size);
2796         i915_timeline_put(timeline);
2797         if (IS_ERR(ring)) {
2798                 ret = PTR_ERR(ring);
2799                 goto error_deref_obj;
2800         }
2801
2802         ret = populate_lr_context(ctx, ctx_obj, engine, ring);
2803         if (ret) {
2804                 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
2805                 goto error_ring_free;
2806         }
2807
2808         ce->ring = ring;
2809         ce->state = vma;
2810
2811         return 0;
2812
2813 error_ring_free:
2814         intel_ring_free(ring);
2815 error_deref_obj:
2816         i915_gem_object_put(ctx_obj);
2817         return ret;
2818 }
2819
2820 void intel_lr_context_resume(struct drm_i915_private *i915)
2821 {
2822         struct intel_engine_cs *engine;
2823         struct i915_gem_context *ctx;
2824         enum intel_engine_id id;
2825
2826         /*
2827          * Because we emit WA_TAIL_DWORDS there may be a disparity
2828          * between our bookkeeping in ce->ring->head and ce->ring->tail and
2829          * that stored in context. As we only write new commands from
2830          * ce->ring->tail onwards, everything before that is junk. If the GPU
2831          * starts reading from its RING_HEAD from the context, it may try to
2832          * execute that junk and die.
2833          *
2834          * So to avoid that we reset the context images upon resume. For
2835          * simplicity, we just zero everything out.
2836          */
2837         list_for_each_entry(ctx, &i915->contexts.list, link) {
2838                 for_each_engine(engine, i915, id) {
2839                         struct intel_context *ce =
2840                                 to_intel_context(ctx, engine);
2841
2842                         if (!ce->state)
2843                                 continue;
2844
2845                         intel_ring_reset(ce->ring, 0);
2846
2847                         if (ce->pin_count) /* otherwise done in context_pin */
2848                                 __execlists_update_reg_state(engine, ce);
2849                 }
2850         }
2851 }
2852
2853 void intel_execlists_show_requests(struct intel_engine_cs *engine,
2854                                    struct drm_printer *m,
2855                                    void (*show_request)(struct drm_printer *m,
2856                                                         struct i915_request *rq,
2857                                                         const char *prefix),
2858                                    unsigned int max)
2859 {
2860         const struct intel_engine_execlists *execlists = &engine->execlists;
2861         struct i915_request *rq, *last;
2862         unsigned long flags;
2863         unsigned int count;
2864         struct rb_node *rb;
2865
2866         spin_lock_irqsave(&engine->timeline.lock, flags);
2867
2868         last = NULL;
2869         count = 0;
2870         list_for_each_entry(rq, &engine->timeline.requests, link) {
2871                 if (count++ < max - 1)
2872                         show_request(m, rq, "\t\tE ");
2873                 else
2874                         last = rq;
2875         }
2876         if (last) {
2877                 if (count > max) {
2878                         drm_printf(m,
2879                                    "\t\t...skipping %d executing requests...\n",
2880                                    count - max);
2881                 }
2882                 show_request(m, last, "\t\tE ");
2883         }
2884
2885         last = NULL;
2886         count = 0;
2887         if (execlists->queue_priority_hint != INT_MIN)
2888                 drm_printf(m, "\t\tQueue priority hint: %d\n",
2889                            execlists->queue_priority_hint);
2890         for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
2891                 struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
2892                 int i;
2893
2894                 priolist_for_each_request(rq, p, i) {
2895                         if (count++ < max - 1)
2896                                 show_request(m, rq, "\t\tQ ");
2897                         else
2898                                 last = rq;
2899                 }
2900         }
2901         if (last) {
2902                 if (count > max) {
2903                         drm_printf(m,
2904                                    "\t\t...skipping %d queued requests...\n",
2905                                    count - max);
2906                 }
2907                 show_request(m, last, "\t\tQ ");
2908         }
2909
2910         spin_unlock_irqrestore(&engine->timeline.lock, flags);
2911 }
2912
2913 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2914 #include "selftests/intel_lrc.c"
2915 #endif