drm/i915/gtt: split up i915_gem_gtt
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gt / intel_ring_submission.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include <linux/log2.h>
31
32 #include <drm/i915_drm.h>
33
34 #include "gem/i915_gem_context.h"
35
36 #include "gen6_ppgtt.h"
37 #include "i915_drv.h"
38 #include "i915_trace.h"
39 #include "intel_context.h"
40 #include "intel_gt.h"
41 #include "intel_gt_irq.h"
42 #include "intel_gt_pm_irq.h"
43 #include "intel_reset.h"
44 #include "intel_ring.h"
45 #include "intel_workarounds.h"
46
47 /* Rough estimate of the typical request size, performing a flush,
48  * set-context and then emitting the batch.
49  */
50 #define LEGACY_REQUEST_SIZE 200
51
52 static int
53 gen2_render_ring_flush(struct i915_request *rq, u32 mode)
54 {
55         unsigned int num_store_dw;
56         u32 cmd, *cs;
57
58         cmd = MI_FLUSH;
59         num_store_dw = 0;
60         if (mode & EMIT_INVALIDATE)
61                 cmd |= MI_READ_FLUSH;
62         if (mode & EMIT_FLUSH)
63                 num_store_dw = 4;
64
65         cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
66         if (IS_ERR(cs))
67                 return PTR_ERR(cs);
68
69         *cs++ = cmd;
70         while (num_store_dw--) {
71                 *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
72                 *cs++ = intel_gt_scratch_offset(rq->engine->gt,
73                                                 INTEL_GT_SCRATCH_FIELD_DEFAULT);
74                 *cs++ = 0;
75         }
76         *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
77
78         intel_ring_advance(rq, cs);
79
80         return 0;
81 }
82
83 static int
84 gen4_render_ring_flush(struct i915_request *rq, u32 mode)
85 {
86         u32 cmd, *cs;
87         int i;
88
89         /*
90          * read/write caches:
91          *
92          * I915_GEM_DOMAIN_RENDER is always invalidated, but is
93          * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
94          * also flushed at 2d versus 3d pipeline switches.
95          *
96          * read-only caches:
97          *
98          * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
99          * MI_READ_FLUSH is set, and is always flushed on 965.
100          *
101          * I915_GEM_DOMAIN_COMMAND may not exist?
102          *
103          * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
104          * invalidated when MI_EXE_FLUSH is set.
105          *
106          * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
107          * invalidated with every MI_FLUSH.
108          *
109          * TLBs:
110          *
111          * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
112          * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
113          * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
114          * are flushed at any MI_FLUSH.
115          */
116
117         cmd = MI_FLUSH;
118         if (mode & EMIT_INVALIDATE) {
119                 cmd |= MI_EXE_FLUSH;
120                 if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5))
121                         cmd |= MI_INVALIDATE_ISP;
122         }
123
124         i = 2;
125         if (mode & EMIT_INVALIDATE)
126                 i += 20;
127
128         cs = intel_ring_begin(rq, i);
129         if (IS_ERR(cs))
130                 return PTR_ERR(cs);
131
132         *cs++ = cmd;
133
134         /*
135          * A random delay to let the CS invalidate take effect? Without this
136          * delay, the GPU relocation path fails as the CS does not see
137          * the updated contents. Just as important, if we apply the flushes
138          * to the EMIT_FLUSH branch (i.e. immediately after the relocation
139          * write and before the invalidate on the next batch), the relocations
140          * still fail. This implies that is a delay following invalidation
141          * that is required to reset the caches as opposed to a delay to
142          * ensure the memory is written.
143          */
144         if (mode & EMIT_INVALIDATE) {
145                 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
146                 *cs++ = intel_gt_scratch_offset(rq->engine->gt,
147                                                 INTEL_GT_SCRATCH_FIELD_DEFAULT) |
148                         PIPE_CONTROL_GLOBAL_GTT;
149                 *cs++ = 0;
150                 *cs++ = 0;
151
152                 for (i = 0; i < 12; i++)
153                         *cs++ = MI_FLUSH;
154
155                 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
156                 *cs++ = intel_gt_scratch_offset(rq->engine->gt,
157                                                 INTEL_GT_SCRATCH_FIELD_DEFAULT) |
158                         PIPE_CONTROL_GLOBAL_GTT;
159                 *cs++ = 0;
160                 *cs++ = 0;
161         }
162
163         *cs++ = cmd;
164
165         intel_ring_advance(rq, cs);
166
167         return 0;
168 }
169
170 /*
171  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
172  * implementing two workarounds on gen6.  From section 1.4.7.1
173  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
174  *
175  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
176  * produced by non-pipelined state commands), software needs to first
177  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
178  * 0.
179  *
180  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
181  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
182  *
183  * And the workaround for these two requires this workaround first:
184  *
185  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
186  * BEFORE the pipe-control with a post-sync op and no write-cache
187  * flushes.
188  *
189  * And this last workaround is tricky because of the requirements on
190  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
191  * volume 2 part 1:
192  *
193  *     "1 of the following must also be set:
194  *      - Render Target Cache Flush Enable ([12] of DW1)
195  *      - Depth Cache Flush Enable ([0] of DW1)
196  *      - Stall at Pixel Scoreboard ([1] of DW1)
197  *      - Depth Stall ([13] of DW1)
198  *      - Post-Sync Operation ([13] of DW1)
199  *      - Notify Enable ([8] of DW1)"
200  *
201  * The cache flushes require the workaround flush that triggered this
202  * one, so we can't use it.  Depth stall would trigger the same.
203  * Post-sync nonzero is what triggered this second workaround, so we
204  * can't use that one either.  Notify enable is IRQs, which aren't
205  * really our business.  That leaves only stall at scoreboard.
206  */
207 static int
208 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
209 {
210         u32 scratch_addr =
211                 intel_gt_scratch_offset(rq->engine->gt,
212                                         INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
213         u32 *cs;
214
215         cs = intel_ring_begin(rq, 6);
216         if (IS_ERR(cs))
217                 return PTR_ERR(cs);
218
219         *cs++ = GFX_OP_PIPE_CONTROL(5);
220         *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
221         *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
222         *cs++ = 0; /* low dword */
223         *cs++ = 0; /* high dword */
224         *cs++ = MI_NOOP;
225         intel_ring_advance(rq, cs);
226
227         cs = intel_ring_begin(rq, 6);
228         if (IS_ERR(cs))
229                 return PTR_ERR(cs);
230
231         *cs++ = GFX_OP_PIPE_CONTROL(5);
232         *cs++ = PIPE_CONTROL_QW_WRITE;
233         *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
234         *cs++ = 0;
235         *cs++ = 0;
236         *cs++ = MI_NOOP;
237         intel_ring_advance(rq, cs);
238
239         return 0;
240 }
241
242 static int
243 gen6_render_ring_flush(struct i915_request *rq, u32 mode)
244 {
245         u32 scratch_addr =
246                 intel_gt_scratch_offset(rq->engine->gt,
247                                         INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
248         u32 *cs, flags = 0;
249         int ret;
250
251         /* Force SNB workarounds for PIPE_CONTROL flushes */
252         ret = gen6_emit_post_sync_nonzero_flush(rq);
253         if (ret)
254                 return ret;
255
256         /* Just flush everything.  Experiments have shown that reducing the
257          * number of bits based on the write domains has little performance
258          * impact.
259          */
260         if (mode & EMIT_FLUSH) {
261                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
262                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
263                 /*
264                  * Ensure that any following seqno writes only happen
265                  * when the render cache is indeed flushed.
266                  */
267                 flags |= PIPE_CONTROL_CS_STALL;
268         }
269         if (mode & EMIT_INVALIDATE) {
270                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
271                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
272                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
273                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
274                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
275                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
276                 /*
277                  * TLB invalidate requires a post-sync write.
278                  */
279                 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
280         }
281
282         cs = intel_ring_begin(rq, 4);
283         if (IS_ERR(cs))
284                 return PTR_ERR(cs);
285
286         *cs++ = GFX_OP_PIPE_CONTROL(4);
287         *cs++ = flags;
288         *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
289         *cs++ = 0;
290         intel_ring_advance(rq, cs);
291
292         return 0;
293 }
294
295 static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
296 {
297         /* First we do the gen6_emit_post_sync_nonzero_flush w/a */
298         *cs++ = GFX_OP_PIPE_CONTROL(4);
299         *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
300         *cs++ = 0;
301         *cs++ = 0;
302
303         *cs++ = GFX_OP_PIPE_CONTROL(4);
304         *cs++ = PIPE_CONTROL_QW_WRITE;
305         *cs++ = intel_gt_scratch_offset(rq->engine->gt,
306                                         INTEL_GT_SCRATCH_FIELD_DEFAULT) |
307                 PIPE_CONTROL_GLOBAL_GTT;
308         *cs++ = 0;
309
310         /* Finally we can flush and with it emit the breadcrumb */
311         *cs++ = GFX_OP_PIPE_CONTROL(4);
312         *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
313                  PIPE_CONTROL_DEPTH_CACHE_FLUSH |
314                  PIPE_CONTROL_DC_FLUSH_ENABLE |
315                  PIPE_CONTROL_QW_WRITE |
316                  PIPE_CONTROL_CS_STALL);
317         *cs++ = i915_request_active_timeline(rq)->hwsp_offset |
318                 PIPE_CONTROL_GLOBAL_GTT;
319         *cs++ = rq->fence.seqno;
320
321         *cs++ = MI_USER_INTERRUPT;
322         *cs++ = MI_NOOP;
323
324         rq->tail = intel_ring_offset(rq, cs);
325         assert_ring_tail_valid(rq->ring, rq->tail);
326
327         return cs;
328 }
329
330 static int
331 gen7_render_ring_cs_stall_wa(struct i915_request *rq)
332 {
333         u32 *cs;
334
335         cs = intel_ring_begin(rq, 4);
336         if (IS_ERR(cs))
337                 return PTR_ERR(cs);
338
339         *cs++ = GFX_OP_PIPE_CONTROL(4);
340         *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
341         *cs++ = 0;
342         *cs++ = 0;
343         intel_ring_advance(rq, cs);
344
345         return 0;
346 }
347
348 static int
349 gen7_render_ring_flush(struct i915_request *rq, u32 mode)
350 {
351         u32 scratch_addr =
352                 intel_gt_scratch_offset(rq->engine->gt,
353                                         INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH);
354         u32 *cs, flags = 0;
355
356         /*
357          * Ensure that any following seqno writes only happen when the render
358          * cache is indeed flushed.
359          *
360          * Workaround: 4th PIPE_CONTROL command (except the ones with only
361          * read-cache invalidate bits set) must have the CS_STALL bit set. We
362          * don't try to be clever and just set it unconditionally.
363          */
364         flags |= PIPE_CONTROL_CS_STALL;
365
366         /*
367          * CS_STALL suggests at least a post-sync write.
368          */
369         flags |= PIPE_CONTROL_QW_WRITE;
370         flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
371
372         /* Just flush everything.  Experiments have shown that reducing the
373          * number of bits based on the write domains has little performance
374          * impact.
375          */
376         if (mode & EMIT_FLUSH) {
377                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
378                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
379                 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
380                 flags |= PIPE_CONTROL_FLUSH_ENABLE;
381         }
382         if (mode & EMIT_INVALIDATE) {
383                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
384                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
385                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
386                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
387                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
388                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
389                 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
390
391                 /* Workaround: we must issue a pipe_control with CS-stall bit
392                  * set before a pipe_control command that has the state cache
393                  * invalidate bit set. */
394                 gen7_render_ring_cs_stall_wa(rq);
395         }
396
397         cs = intel_ring_begin(rq, 4);
398         if (IS_ERR(cs))
399                 return PTR_ERR(cs);
400
401         *cs++ = GFX_OP_PIPE_CONTROL(4);
402         *cs++ = flags;
403         *cs++ = scratch_addr;
404         *cs++ = 0;
405         intel_ring_advance(rq, cs);
406
407         return 0;
408 }
409
410 static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
411 {
412         *cs++ = GFX_OP_PIPE_CONTROL(4);
413         *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
414                  PIPE_CONTROL_DEPTH_CACHE_FLUSH |
415                  PIPE_CONTROL_DC_FLUSH_ENABLE |
416                  PIPE_CONTROL_FLUSH_ENABLE |
417                  PIPE_CONTROL_QW_WRITE |
418                  PIPE_CONTROL_GLOBAL_GTT_IVB |
419                  PIPE_CONTROL_CS_STALL);
420         *cs++ = i915_request_active_timeline(rq)->hwsp_offset;
421         *cs++ = rq->fence.seqno;
422
423         *cs++ = MI_USER_INTERRUPT;
424         *cs++ = MI_NOOP;
425
426         rq->tail = intel_ring_offset(rq, cs);
427         assert_ring_tail_valid(rq->ring, rq->tail);
428
429         return cs;
430 }
431
432 static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
433 {
434         GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
435         GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
436
437         *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
438         *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
439         *cs++ = rq->fence.seqno;
440
441         *cs++ = MI_USER_INTERRUPT;
442
443         rq->tail = intel_ring_offset(rq, cs);
444         assert_ring_tail_valid(rq->ring, rq->tail);
445
446         return cs;
447 }
448
449 #define GEN7_XCS_WA 32
450 static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
451 {
452         int i;
453
454         GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
455         GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
456
457         *cs++ = MI_FLUSH_DW | MI_INVALIDATE_TLB |
458                 MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
459         *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT;
460         *cs++ = rq->fence.seqno;
461
462         for (i = 0; i < GEN7_XCS_WA; i++) {
463                 *cs++ = MI_STORE_DWORD_INDEX;
464                 *cs++ = I915_GEM_HWS_SEQNO_ADDR;
465                 *cs++ = rq->fence.seqno;
466         }
467
468         *cs++ = MI_FLUSH_DW;
469         *cs++ = 0;
470         *cs++ = 0;
471
472         *cs++ = MI_USER_INTERRUPT;
473         *cs++ = MI_NOOP;
474
475         rq->tail = intel_ring_offset(rq, cs);
476         assert_ring_tail_valid(rq->ring, rq->tail);
477
478         return cs;
479 }
480 #undef GEN7_XCS_WA
481
482 static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
483 {
484         /*
485          * Keep the render interrupt unmasked as this papers over
486          * lost interrupts following a reset.
487          */
488         if (engine->class == RENDER_CLASS) {
489                 if (INTEL_GEN(engine->i915) >= 6)
490                         mask &= ~BIT(0);
491                 else
492                         mask &= ~I915_USER_INTERRUPT;
493         }
494
495         intel_engine_set_hwsp_writemask(engine, mask);
496 }
497
498 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
499 {
500         u32 addr;
501
502         addr = lower_32_bits(phys);
503         if (INTEL_GEN(engine->i915) >= 4)
504                 addr |= (phys >> 28) & 0xf0;
505
506         intel_uncore_write(engine->uncore, HWS_PGA, addr);
507 }
508
509 static struct page *status_page(struct intel_engine_cs *engine)
510 {
511         struct drm_i915_gem_object *obj = engine->status_page.vma->obj;
512
513         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
514         return sg_page(obj->mm.pages->sgl);
515 }
516
517 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
518 {
519         set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine))));
520         set_hwstam(engine, ~0u);
521 }
522
523 static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
524 {
525         i915_reg_t hwsp;
526
527         /*
528          * The ring status page addresses are no longer next to the rest of
529          * the ring registers as of gen7.
530          */
531         if (IS_GEN(engine->i915, 7)) {
532                 switch (engine->id) {
533                 /*
534                  * No more rings exist on Gen7. Default case is only to shut up
535                  * gcc switch check warning.
536                  */
537                 default:
538                         GEM_BUG_ON(engine->id);
539                         /* fallthrough */
540                 case RCS0:
541                         hwsp = RENDER_HWS_PGA_GEN7;
542                         break;
543                 case BCS0:
544                         hwsp = BLT_HWS_PGA_GEN7;
545                         break;
546                 case VCS0:
547                         hwsp = BSD_HWS_PGA_GEN7;
548                         break;
549                 case VECS0:
550                         hwsp = VEBOX_HWS_PGA_GEN7;
551                         break;
552                 }
553         } else if (IS_GEN(engine->i915, 6)) {
554                 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
555         } else {
556                 hwsp = RING_HWS_PGA(engine->mmio_base);
557         }
558
559         intel_uncore_write(engine->uncore, hwsp, offset);
560         intel_uncore_posting_read(engine->uncore, hwsp);
561 }
562
563 static void flush_cs_tlb(struct intel_engine_cs *engine)
564 {
565         struct drm_i915_private *dev_priv = engine->i915;
566
567         if (!IS_GEN_RANGE(dev_priv, 6, 7))
568                 return;
569
570         /* ring should be idle before issuing a sync flush*/
571         WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
572
573         ENGINE_WRITE(engine, RING_INSTPM,
574                      _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
575                                         INSTPM_SYNC_FLUSH));
576         if (intel_wait_for_register(engine->uncore,
577                                     RING_INSTPM(engine->mmio_base),
578                                     INSTPM_SYNC_FLUSH, 0,
579                                     1000))
580                 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
581                           engine->name);
582 }
583
584 static void ring_setup_status_page(struct intel_engine_cs *engine)
585 {
586         set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma));
587         set_hwstam(engine, ~0u);
588
589         flush_cs_tlb(engine);
590 }
591
592 static bool stop_ring(struct intel_engine_cs *engine)
593 {
594         struct drm_i915_private *dev_priv = engine->i915;
595
596         if (INTEL_GEN(dev_priv) > 2) {
597                 ENGINE_WRITE(engine,
598                              RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING));
599                 if (intel_wait_for_register(engine->uncore,
600                                             RING_MI_MODE(engine->mmio_base),
601                                             MODE_IDLE,
602                                             MODE_IDLE,
603                                             1000)) {
604                         DRM_ERROR("%s : timed out trying to stop ring\n",
605                                   engine->name);
606
607                         /*
608                          * Sometimes we observe that the idle flag is not
609                          * set even though the ring is empty. So double
610                          * check before giving up.
611                          */
612                         if (ENGINE_READ(engine, RING_HEAD) !=
613                             ENGINE_READ(engine, RING_TAIL))
614                                 return false;
615                 }
616         }
617
618         ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL));
619
620         ENGINE_WRITE(engine, RING_HEAD, 0);
621         ENGINE_WRITE(engine, RING_TAIL, 0);
622
623         /* The ring must be empty before it is disabled */
624         ENGINE_WRITE(engine, RING_CTL, 0);
625
626         return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
627 }
628
629 static int xcs_resume(struct intel_engine_cs *engine)
630 {
631         struct drm_i915_private *dev_priv = engine->i915;
632         struct intel_ring *ring = engine->legacy.ring;
633         int ret = 0;
634
635         ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
636                      ring->head, ring->tail);
637
638         intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
639
640         /* WaClearRingBufHeadRegAtInit:ctg,elk */
641         if (!stop_ring(engine)) {
642                 /* G45 ring initialization often fails to reset head to zero */
643                 DRM_DEBUG_DRIVER("%s head not reset to zero "
644                                 "ctl %08x head %08x tail %08x start %08x\n",
645                                 engine->name,
646                                 ENGINE_READ(engine, RING_CTL),
647                                 ENGINE_READ(engine, RING_HEAD),
648                                 ENGINE_READ(engine, RING_TAIL),
649                                 ENGINE_READ(engine, RING_START));
650
651                 if (!stop_ring(engine)) {
652                         DRM_ERROR("failed to set %s head to zero "
653                                   "ctl %08x head %08x tail %08x start %08x\n",
654                                   engine->name,
655                                   ENGINE_READ(engine, RING_CTL),
656                                   ENGINE_READ(engine, RING_HEAD),
657                                   ENGINE_READ(engine, RING_TAIL),
658                                   ENGINE_READ(engine, RING_START));
659                         ret = -EIO;
660                         goto out;
661                 }
662         }
663
664         if (HWS_NEEDS_PHYSICAL(dev_priv))
665                 ring_setup_phys_status_page(engine);
666         else
667                 ring_setup_status_page(engine);
668
669         intel_engine_reset_breadcrumbs(engine);
670
671         /* Enforce ordering by reading HEAD register back */
672         ENGINE_POSTING_READ(engine, RING_HEAD);
673
674         /*
675          * Initialize the ring. This must happen _after_ we've cleared the ring
676          * registers with the above sequence (the readback of the HEAD registers
677          * also enforces ordering), otherwise the hw might lose the new ring
678          * register values.
679          */
680         ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
681
682         /* Check that the ring offsets point within the ring! */
683         GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
684         GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
685         intel_ring_update_space(ring);
686
687         /* First wake the ring up to an empty/idle ring */
688         ENGINE_WRITE(engine, RING_HEAD, ring->head);
689         ENGINE_WRITE(engine, RING_TAIL, ring->head);
690         ENGINE_POSTING_READ(engine, RING_TAIL);
691
692         ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID);
693
694         /* If the head is still not zero, the ring is dead */
695         if (intel_wait_for_register(engine->uncore,
696                                     RING_CTL(engine->mmio_base),
697                                     RING_VALID, RING_VALID,
698                                     50)) {
699                 DRM_ERROR("%s initialization failed "
700                           "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
701                           engine->name,
702                           ENGINE_READ(engine, RING_CTL),
703                           ENGINE_READ(engine, RING_CTL) & RING_VALID,
704                           ENGINE_READ(engine, RING_HEAD), ring->head,
705                           ENGINE_READ(engine, RING_TAIL), ring->tail,
706                           ENGINE_READ(engine, RING_START),
707                           i915_ggtt_offset(ring->vma));
708                 ret = -EIO;
709                 goto out;
710         }
711
712         if (INTEL_GEN(dev_priv) > 2)
713                 ENGINE_WRITE(engine,
714                              RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
715
716         /* Now awake, let it get started */
717         if (ring->tail != ring->head) {
718                 ENGINE_WRITE(engine, RING_TAIL, ring->tail);
719                 ENGINE_POSTING_READ(engine, RING_TAIL);
720         }
721
722         /* Papering over lost _interrupts_ immediately following the restart */
723         intel_engine_signal_breadcrumbs(engine);
724 out:
725         intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
726
727         return ret;
728 }
729
730 static void reset_prepare(struct intel_engine_cs *engine)
731 {
732         struct intel_uncore *uncore = engine->uncore;
733         const u32 base = engine->mmio_base;
734
735         /*
736          * We stop engines, otherwise we might get failed reset and a
737          * dead gpu (on elk). Also as modern gpu as kbl can suffer
738          * from system hang if batchbuffer is progressing when
739          * the reset is issued, regardless of READY_TO_RESET ack.
740          * Thus assume it is best to stop engines on all gens
741          * where we have a gpu reset.
742          *
743          * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
744          *
745          * WaMediaResetMainRingCleanup:ctg,elk (presumably)
746          *
747          * FIXME: Wa for more modern gens needs to be validated
748          */
749         ENGINE_TRACE(engine, "\n");
750
751         if (intel_engine_stop_cs(engine))
752                 ENGINE_TRACE(engine, "timed out on STOP_RING\n");
753
754         intel_uncore_write_fw(uncore,
755                               RING_HEAD(base),
756                               intel_uncore_read_fw(uncore, RING_TAIL(base)));
757         intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
758
759         intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
760         intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
761         intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
762
763         /* The ring must be empty before it is disabled */
764         intel_uncore_write_fw(uncore, RING_CTL(base), 0);
765
766         /* Check acts as a post */
767         if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
768                 ENGINE_TRACE(engine, "ring head [%x] not parked\n",
769                              intel_uncore_read_fw(uncore, RING_HEAD(base)));
770 }
771
772 static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
773 {
774         struct i915_request *pos, *rq;
775         unsigned long flags;
776         u32 head;
777
778         rq = NULL;
779         spin_lock_irqsave(&engine->active.lock, flags);
780         list_for_each_entry(pos, &engine->active.requests, sched.link) {
781                 if (!i915_request_completed(pos)) {
782                         rq = pos;
783                         break;
784                 }
785         }
786
787         /*
788          * The guilty request will get skipped on a hung engine.
789          *
790          * Users of client default contexts do not rely on logical
791          * state preserved between batches so it is safe to execute
792          * queued requests following the hang. Non default contexts
793          * rely on preserved state, so skipping a batch loses the
794          * evolution of the state and it needs to be considered corrupted.
795          * Executing more queued batches on top of corrupted state is
796          * risky. But we take the risk by trying to advance through
797          * the queued requests in order to make the client behaviour
798          * more predictable around resets, by not throwing away random
799          * amount of batches it has prepared for execution. Sophisticated
800          * clients can use gem_reset_stats_ioctl and dma fence status
801          * (exported via sync_file info ioctl on explicit fences) to observe
802          * when it loses the context state and should rebuild accordingly.
803          *
804          * The context ban, and ultimately the client ban, mechanism are safety
805          * valves if client submission ends up resulting in nothing more than
806          * subsequent hangs.
807          */
808
809         if (rq) {
810                 /*
811                  * Try to restore the logical GPU state to match the
812                  * continuation of the request queue. If we skip the
813                  * context/PD restore, then the next request may try to execute
814                  * assuming that its context is valid and loaded on the GPU and
815                  * so may try to access invalid memory, prompting repeated GPU
816                  * hangs.
817                  *
818                  * If the request was guilty, we still restore the logical
819                  * state in case the next request requires it (e.g. the
820                  * aliasing ppgtt), but skip over the hung batch.
821                  *
822                  * If the request was innocent, we try to replay the request
823                  * with the restored context.
824                  */
825                 __i915_request_reset(rq, stalled);
826
827                 GEM_BUG_ON(rq->ring != engine->legacy.ring);
828                 head = rq->head;
829         } else {
830                 head = engine->legacy.ring->tail;
831         }
832         engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
833
834         spin_unlock_irqrestore(&engine->active.lock, flags);
835 }
836
837 static void reset_finish(struct intel_engine_cs *engine)
838 {
839 }
840
841 static int rcs_resume(struct intel_engine_cs *engine)
842 {
843         struct drm_i915_private *i915 = engine->i915;
844         struct intel_uncore *uncore = engine->uncore;
845
846         /*
847          * Disable CONSTANT_BUFFER before it is loaded from the context
848          * image. For as it is loaded, it is executed and the stored
849          * address may no longer be valid, leading to a GPU hang.
850          *
851          * This imposes the requirement that userspace reload their
852          * CONSTANT_BUFFER on every batch, fortunately a requirement
853          * they are already accustomed to from before contexts were
854          * enabled.
855          */
856         if (IS_GEN(i915, 4))
857                 intel_uncore_write(uncore, ECOSKPD,
858                            _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
859
860         /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
861         if (IS_GEN_RANGE(i915, 4, 6))
862                 intel_uncore_write(uncore, MI_MODE,
863                                    _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
864
865         /* We need to disable the AsyncFlip performance optimisations in order
866          * to use MI_WAIT_FOR_EVENT within the CS. It should already be
867          * programmed to '1' on all products.
868          *
869          * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
870          */
871         if (IS_GEN_RANGE(i915, 6, 7))
872                 intel_uncore_write(uncore, MI_MODE,
873                                    _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
874
875         /* Required for the hardware to program scanline values for waiting */
876         /* WaEnableFlushTlbInvalidationMode:snb */
877         if (IS_GEN(i915, 6))
878                 intel_uncore_write(uncore, GFX_MODE,
879                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
880
881         /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
882         if (IS_GEN(i915, 7))
883                 intel_uncore_write(uncore, GFX_MODE_GEN7,
884                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
885                            _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
886
887         if (IS_GEN(i915, 6)) {
888                 /* From the Sandybridge PRM, volume 1 part 3, page 24:
889                  * "If this bit is set, STCunit will have LRA as replacement
890                  *  policy. [...] This bit must be reset.  LRA replacement
891                  *  policy is not supported."
892                  */
893                 intel_uncore_write(uncore, CACHE_MODE_0,
894                            _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
895         }
896
897         if (IS_GEN_RANGE(i915, 6, 7))
898                 intel_uncore_write(uncore, INSTPM,
899                                    _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
900
901         return xcs_resume(engine);
902 }
903
904 static void reset_cancel(struct intel_engine_cs *engine)
905 {
906         struct i915_request *request;
907         unsigned long flags;
908
909         spin_lock_irqsave(&engine->active.lock, flags);
910
911         /* Mark all submitted requests as skipped. */
912         list_for_each_entry(request, &engine->active.requests, sched.link) {
913                 if (!i915_request_signaled(request))
914                         dma_fence_set_error(&request->fence, -EIO);
915
916                 i915_request_mark_complete(request);
917         }
918
919         /* Remaining _unready_ requests will be nop'ed when submitted */
920
921         spin_unlock_irqrestore(&engine->active.lock, flags);
922 }
923
924 static void i9xx_submit_request(struct i915_request *request)
925 {
926         i915_request_submit(request);
927         wmb(); /* paranoid flush writes out of the WCB before mmio */
928
929         ENGINE_WRITE(request->engine, RING_TAIL,
930                      intel_ring_set_tail(request->ring, request->tail));
931 }
932
933 static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
934 {
935         GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
936         GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
937
938         *cs++ = MI_FLUSH;
939
940         *cs++ = MI_STORE_DWORD_INDEX;
941         *cs++ = I915_GEM_HWS_SEQNO_ADDR;
942         *cs++ = rq->fence.seqno;
943
944         *cs++ = MI_USER_INTERRUPT;
945         *cs++ = MI_NOOP;
946
947         rq->tail = intel_ring_offset(rq, cs);
948         assert_ring_tail_valid(rq->ring, rq->tail);
949
950         return cs;
951 }
952
953 #define GEN5_WA_STORES 8 /* must be at least 1! */
954 static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
955 {
956         int i;
957
958         GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma);
959         GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR);
960
961         *cs++ = MI_FLUSH;
962
963         BUILD_BUG_ON(GEN5_WA_STORES < 1);
964         for (i = 0; i < GEN5_WA_STORES; i++) {
965                 *cs++ = MI_STORE_DWORD_INDEX;
966                 *cs++ = I915_GEM_HWS_SEQNO_ADDR;
967                 *cs++ = rq->fence.seqno;
968         }
969
970         *cs++ = MI_USER_INTERRUPT;
971
972         rq->tail = intel_ring_offset(rq, cs);
973         assert_ring_tail_valid(rq->ring, rq->tail);
974
975         return cs;
976 }
977 #undef GEN5_WA_STORES
978
979 static void
980 gen5_irq_enable(struct intel_engine_cs *engine)
981 {
982         gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
983 }
984
985 static void
986 gen5_irq_disable(struct intel_engine_cs *engine)
987 {
988         gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
989 }
990
991 static void
992 i9xx_irq_enable(struct intel_engine_cs *engine)
993 {
994         engine->i915->irq_mask &= ~engine->irq_enable_mask;
995         intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
996         intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR);
997 }
998
999 static void
1000 i9xx_irq_disable(struct intel_engine_cs *engine)
1001 {
1002         engine->i915->irq_mask |= engine->irq_enable_mask;
1003         intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
1004 }
1005
1006 static void
1007 i8xx_irq_enable(struct intel_engine_cs *engine)
1008 {
1009         struct drm_i915_private *i915 = engine->i915;
1010
1011         i915->irq_mask &= ~engine->irq_enable_mask;
1012         intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
1013         ENGINE_POSTING_READ16(engine, RING_IMR);
1014 }
1015
1016 static void
1017 i8xx_irq_disable(struct intel_engine_cs *engine)
1018 {
1019         struct drm_i915_private *i915 = engine->i915;
1020
1021         i915->irq_mask |= engine->irq_enable_mask;
1022         intel_uncore_write16(&i915->uncore, GEN2_IMR, i915->irq_mask);
1023 }
1024
1025 static int
1026 bsd_ring_flush(struct i915_request *rq, u32 mode)
1027 {
1028         u32 *cs;
1029
1030         cs = intel_ring_begin(rq, 2);
1031         if (IS_ERR(cs))
1032                 return PTR_ERR(cs);
1033
1034         *cs++ = MI_FLUSH;
1035         *cs++ = MI_NOOP;
1036         intel_ring_advance(rq, cs);
1037         return 0;
1038 }
1039
1040 static void
1041 gen6_irq_enable(struct intel_engine_cs *engine)
1042 {
1043         ENGINE_WRITE(engine, RING_IMR,
1044                      ~(engine->irq_enable_mask | engine->irq_keep_mask));
1045
1046         /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
1047         ENGINE_POSTING_READ(engine, RING_IMR);
1048
1049         gen5_gt_enable_irq(engine->gt, engine->irq_enable_mask);
1050 }
1051
1052 static void
1053 gen6_irq_disable(struct intel_engine_cs *engine)
1054 {
1055         ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
1056         gen5_gt_disable_irq(engine->gt, engine->irq_enable_mask);
1057 }
1058
1059 static void
1060 hsw_vebox_irq_enable(struct intel_engine_cs *engine)
1061 {
1062         ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask);
1063
1064         /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
1065         ENGINE_POSTING_READ(engine, RING_IMR);
1066
1067         gen6_gt_pm_unmask_irq(engine->gt, engine->irq_enable_mask);
1068 }
1069
1070 static void
1071 hsw_vebox_irq_disable(struct intel_engine_cs *engine)
1072 {
1073         ENGINE_WRITE(engine, RING_IMR, ~0);
1074         gen6_gt_pm_mask_irq(engine->gt, engine->irq_enable_mask);
1075 }
1076
1077 static int
1078 i965_emit_bb_start(struct i915_request *rq,
1079                    u64 offset, u32 length,
1080                    unsigned int dispatch_flags)
1081 {
1082         u32 *cs;
1083
1084         cs = intel_ring_begin(rq, 2);
1085         if (IS_ERR(cs))
1086                 return PTR_ERR(cs);
1087
1088         *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
1089                 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
1090         *cs++ = offset;
1091         intel_ring_advance(rq, cs);
1092
1093         return 0;
1094 }
1095
1096 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1097 #define I830_BATCH_LIMIT SZ_256K
1098 #define I830_TLB_ENTRIES (2)
1099 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1100 static int
1101 i830_emit_bb_start(struct i915_request *rq,
1102                    u64 offset, u32 len,
1103                    unsigned int dispatch_flags)
1104 {
1105         u32 *cs, cs_offset =
1106                 intel_gt_scratch_offset(rq->engine->gt,
1107                                         INTEL_GT_SCRATCH_FIELD_DEFAULT);
1108
1109         GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
1110
1111         cs = intel_ring_begin(rq, 6);
1112         if (IS_ERR(cs))
1113                 return PTR_ERR(cs);
1114
1115         /* Evict the invalid PTE TLBs */
1116         *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
1117         *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
1118         *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
1119         *cs++ = cs_offset;
1120         *cs++ = 0xdeadbeef;
1121         *cs++ = MI_NOOP;
1122         intel_ring_advance(rq, cs);
1123
1124         if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1125                 if (len > I830_BATCH_LIMIT)
1126                         return -ENOSPC;
1127
1128                 cs = intel_ring_begin(rq, 6 + 2);
1129                 if (IS_ERR(cs))
1130                         return PTR_ERR(cs);
1131
1132                 /* Blit the batch (which has now all relocs applied) to the
1133                  * stable batch scratch bo area (so that the CS never
1134                  * stumbles over its tlb invalidation bug) ...
1135                  */
1136                 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
1137                 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
1138                 *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
1139                 *cs++ = cs_offset;
1140                 *cs++ = 4096;
1141                 *cs++ = offset;
1142
1143                 *cs++ = MI_FLUSH;
1144                 *cs++ = MI_NOOP;
1145                 intel_ring_advance(rq, cs);
1146
1147                 /* ... and execute it. */
1148                 offset = cs_offset;
1149         }
1150
1151         cs = intel_ring_begin(rq, 2);
1152         if (IS_ERR(cs))
1153                 return PTR_ERR(cs);
1154
1155         *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1156         *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1157                 MI_BATCH_NON_SECURE);
1158         intel_ring_advance(rq, cs);
1159
1160         return 0;
1161 }
1162
1163 static int
1164 i915_emit_bb_start(struct i915_request *rq,
1165                    u64 offset, u32 len,
1166                    unsigned int dispatch_flags)
1167 {
1168         u32 *cs;
1169
1170         cs = intel_ring_begin(rq, 2);
1171         if (IS_ERR(cs))
1172                 return PTR_ERR(cs);
1173
1174         *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1175         *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1176                 MI_BATCH_NON_SECURE);
1177         intel_ring_advance(rq, cs);
1178
1179         return 0;
1180 }
1181
1182 static void __ring_context_fini(struct intel_context *ce)
1183 {
1184         i915_vma_put(ce->state);
1185 }
1186
1187 static void ring_context_destroy(struct kref *ref)
1188 {
1189         struct intel_context *ce = container_of(ref, typeof(*ce), ref);
1190
1191         GEM_BUG_ON(intel_context_is_pinned(ce));
1192
1193         if (ce->state)
1194                 __ring_context_fini(ce);
1195
1196         intel_context_fini(ce);
1197         intel_context_free(ce);
1198 }
1199
1200 static struct i915_address_space *vm_alias(struct intel_context *ce)
1201 {
1202         struct i915_address_space *vm;
1203
1204         vm = ce->vm;
1205         if (i915_is_ggtt(vm))
1206                 vm = &i915_vm_to_ggtt(vm)->alias->vm;
1207
1208         return vm;
1209 }
1210
1211 static int __context_pin_ppgtt(struct intel_context *ce)
1212 {
1213         struct i915_address_space *vm;
1214         int err = 0;
1215
1216         vm = vm_alias(ce);
1217         if (vm)
1218                 err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)));
1219
1220         return err;
1221 }
1222
1223 static void __context_unpin_ppgtt(struct intel_context *ce)
1224 {
1225         struct i915_address_space *vm;
1226
1227         vm = vm_alias(ce);
1228         if (vm)
1229                 gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
1230 }
1231
1232 static void ring_context_unpin(struct intel_context *ce)
1233 {
1234         __context_unpin_ppgtt(ce);
1235 }
1236
1237 static struct i915_vma *
1238 alloc_context_vma(struct intel_engine_cs *engine)
1239 {
1240         struct drm_i915_private *i915 = engine->i915;
1241         struct drm_i915_gem_object *obj;
1242         struct i915_vma *vma;
1243         int err;
1244
1245         obj = i915_gem_object_create_shmem(i915, engine->context_size);
1246         if (IS_ERR(obj))
1247                 return ERR_CAST(obj);
1248
1249         /*
1250          * Try to make the context utilize L3 as well as LLC.
1251          *
1252          * On VLV we don't have L3 controls in the PTEs so we
1253          * shouldn't touch the cache level, especially as that
1254          * would make the object snooped which might have a
1255          * negative performance impact.
1256          *
1257          * Snooping is required on non-llc platforms in execlist
1258          * mode, but since all GGTT accesses use PAT entry 0 we
1259          * get snooping anyway regardless of cache_level.
1260          *
1261          * This is only applicable for Ivy Bridge devices since
1262          * later platforms don't have L3 control bits in the PTE.
1263          */
1264         if (IS_IVYBRIDGE(i915))
1265                 i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
1266
1267         if (engine->default_state) {
1268                 void *defaults, *vaddr;
1269
1270                 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1271                 if (IS_ERR(vaddr)) {
1272                         err = PTR_ERR(vaddr);
1273                         goto err_obj;
1274                 }
1275
1276                 defaults = i915_gem_object_pin_map(engine->default_state,
1277                                                    I915_MAP_WB);
1278                 if (IS_ERR(defaults)) {
1279                         err = PTR_ERR(defaults);
1280                         goto err_map;
1281                 }
1282
1283                 memcpy(vaddr, defaults, engine->context_size);
1284                 i915_gem_object_unpin_map(engine->default_state);
1285
1286                 i915_gem_object_flush_map(obj);
1287                 i915_gem_object_unpin_map(obj);
1288         }
1289
1290         vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
1291         if (IS_ERR(vma)) {
1292                 err = PTR_ERR(vma);
1293                 goto err_obj;
1294         }
1295
1296         return vma;
1297
1298 err_map:
1299         i915_gem_object_unpin_map(obj);
1300 err_obj:
1301         i915_gem_object_put(obj);
1302         return ERR_PTR(err);
1303 }
1304
1305 static int ring_context_alloc(struct intel_context *ce)
1306 {
1307         struct intel_engine_cs *engine = ce->engine;
1308
1309         /* One ringbuffer to rule them all */
1310         GEM_BUG_ON(!engine->legacy.ring);
1311         ce->ring = engine->legacy.ring;
1312         ce->timeline = intel_timeline_get(engine->legacy.timeline);
1313
1314         GEM_BUG_ON(ce->state);
1315         if (engine->context_size) {
1316                 struct i915_vma *vma;
1317
1318                 vma = alloc_context_vma(engine);
1319                 if (IS_ERR(vma))
1320                         return PTR_ERR(vma);
1321
1322                 ce->state = vma;
1323                 if (engine->default_state)
1324                         __set_bit(CONTEXT_VALID_BIT, &ce->flags);
1325         }
1326
1327         return 0;
1328 }
1329
1330 static int ring_context_pin(struct intel_context *ce)
1331 {
1332         int err;
1333
1334         err = intel_context_active_acquire(ce);
1335         if (err)
1336                 return err;
1337
1338         err = __context_pin_ppgtt(ce);
1339         if (err)
1340                 goto err_active;
1341
1342         return 0;
1343
1344 err_active:
1345         intel_context_active_release(ce);
1346         return err;
1347 }
1348
1349 static void ring_context_reset(struct intel_context *ce)
1350 {
1351         intel_ring_reset(ce->ring, ce->ring->emit);
1352 }
1353
1354 static const struct intel_context_ops ring_context_ops = {
1355         .alloc = ring_context_alloc,
1356
1357         .pin = ring_context_pin,
1358         .unpin = ring_context_unpin,
1359
1360         .enter = intel_context_enter_engine,
1361         .exit = intel_context_exit_engine,
1362
1363         .reset = ring_context_reset,
1364         .destroy = ring_context_destroy,
1365 };
1366
1367 static int load_pd_dir(struct i915_request *rq,
1368                        const struct i915_ppgtt *ppgtt,
1369                        u32 valid)
1370 {
1371         const struct intel_engine_cs * const engine = rq->engine;
1372         u32 *cs;
1373
1374         cs = intel_ring_begin(rq, 12);
1375         if (IS_ERR(cs))
1376                 return PTR_ERR(cs);
1377
1378         *cs++ = MI_LOAD_REGISTER_IMM(1);
1379         *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
1380         *cs++ = valid;
1381
1382         *cs++ = MI_LOAD_REGISTER_IMM(1);
1383         *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
1384         *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10;
1385
1386         /* Stall until the page table load is complete? */
1387         *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1388         *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
1389         *cs++ = intel_gt_scratch_offset(engine->gt,
1390                                         INTEL_GT_SCRATCH_FIELD_DEFAULT);
1391
1392         *cs++ = MI_LOAD_REGISTER_IMM(1);
1393         *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base));
1394         *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE);
1395
1396         intel_ring_advance(rq, cs);
1397
1398         return rq->engine->emit_flush(rq, EMIT_FLUSH);
1399 }
1400
1401 static inline int mi_set_context(struct i915_request *rq, u32 flags)
1402 {
1403         struct drm_i915_private *i915 = rq->i915;
1404         struct intel_engine_cs *engine = rq->engine;
1405         enum intel_engine_id id;
1406         const int num_engines =
1407                 IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
1408         bool force_restore = false;
1409         int len;
1410         u32 *cs;
1411
1412         len = 4;
1413         if (IS_GEN(i915, 7))
1414                 len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
1415         else if (IS_GEN(i915, 5))
1416                 len += 2;
1417         if (flags & MI_FORCE_RESTORE) {
1418                 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
1419                 flags &= ~MI_FORCE_RESTORE;
1420                 force_restore = true;
1421                 len += 2;
1422         }
1423
1424         cs = intel_ring_begin(rq, len);
1425         if (IS_ERR(cs))
1426                 return PTR_ERR(cs);
1427
1428         /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
1429         if (IS_GEN(i915, 7)) {
1430                 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1431                 if (num_engines) {
1432                         struct intel_engine_cs *signaller;
1433
1434                         *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
1435                         for_each_engine(signaller, engine->gt, id) {
1436                                 if (signaller == engine)
1437                                         continue;
1438
1439                                 *cs++ = i915_mmio_reg_offset(
1440                                            RING_PSMI_CTL(signaller->mmio_base));
1441                                 *cs++ = _MASKED_BIT_ENABLE(
1442                                                 GEN6_PSMI_SLEEP_MSG_DISABLE);
1443                         }
1444                 }
1445         } else if (IS_GEN(i915, 5)) {
1446                 /*
1447                  * This w/a is only listed for pre-production ilk a/b steppings,
1448                  * but is also mentioned for programming the powerctx. To be
1449                  * safe, just apply the workaround; we do not use SyncFlush so
1450                  * this should never take effect and so be a no-op!
1451                  */
1452                 *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN;
1453         }
1454
1455         if (force_restore) {
1456                 /*
1457                  * The HW doesn't handle being told to restore the current
1458                  * context very well. Quite often it likes goes to go off and
1459                  * sulk, especially when it is meant to be reloading PP_DIR.
1460                  * A very simple fix to force the reload is to simply switch
1461                  * away from the current context and back again.
1462                  *
1463                  * Note that the kernel_context will contain random state
1464                  * following the INHIBIT_RESTORE. We accept this since we
1465                  * never use the kernel_context state; it is merely a
1466                  * placeholder we use to flush other contexts.
1467                  */
1468                 *cs++ = MI_SET_CONTEXT;
1469                 *cs++ = i915_ggtt_offset(engine->kernel_context->state) |
1470                         MI_MM_SPACE_GTT |
1471                         MI_RESTORE_INHIBIT;
1472         }
1473
1474         *cs++ = MI_NOOP;
1475         *cs++ = MI_SET_CONTEXT;
1476         *cs++ = i915_ggtt_offset(rq->context->state) | flags;
1477         /*
1478          * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
1479          * WaMiSetContext_Hang:snb,ivb,vlv
1480          */
1481         *cs++ = MI_NOOP;
1482
1483         if (IS_GEN(i915, 7)) {
1484                 if (num_engines) {
1485                         struct intel_engine_cs *signaller;
1486                         i915_reg_t last_reg = {}; /* keep gcc quiet */
1487
1488                         *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
1489                         for_each_engine(signaller, engine->gt, id) {
1490                                 if (signaller == engine)
1491                                         continue;
1492
1493                                 last_reg = RING_PSMI_CTL(signaller->mmio_base);
1494                                 *cs++ = i915_mmio_reg_offset(last_reg);
1495                                 *cs++ = _MASKED_BIT_DISABLE(
1496                                                 GEN6_PSMI_SLEEP_MSG_DISABLE);
1497                         }
1498
1499                         /* Insert a delay before the next switch! */
1500                         *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1501                         *cs++ = i915_mmio_reg_offset(last_reg);
1502                         *cs++ = intel_gt_scratch_offset(engine->gt,
1503                                                         INTEL_GT_SCRATCH_FIELD_DEFAULT);
1504                         *cs++ = MI_NOOP;
1505                 }
1506                 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1507         } else if (IS_GEN(i915, 5)) {
1508                 *cs++ = MI_SUSPEND_FLUSH;
1509         }
1510
1511         intel_ring_advance(rq, cs);
1512
1513         return 0;
1514 }
1515
1516 static int remap_l3_slice(struct i915_request *rq, int slice)
1517 {
1518         u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
1519         int i;
1520
1521         if (!remap_info)
1522                 return 0;
1523
1524         cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2);
1525         if (IS_ERR(cs))
1526                 return PTR_ERR(cs);
1527
1528         /*
1529          * Note: We do not worry about the concurrent register cacheline hang
1530          * here because no other code should access these registers other than
1531          * at initialization time.
1532          */
1533         *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
1534         for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
1535                 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
1536                 *cs++ = remap_info[i];
1537         }
1538         *cs++ = MI_NOOP;
1539         intel_ring_advance(rq, cs);
1540
1541         return 0;
1542 }
1543
1544 static int remap_l3(struct i915_request *rq)
1545 {
1546         struct i915_gem_context *ctx = i915_request_gem_context(rq);
1547         int i, err;
1548
1549         if (!ctx || !ctx->remap_slice)
1550                 return 0;
1551
1552         for (i = 0; i < MAX_L3_SLICES; i++) {
1553                 if (!(ctx->remap_slice & BIT(i)))
1554                         continue;
1555
1556                 err = remap_l3_slice(rq, i);
1557                 if (err)
1558                         return err;
1559         }
1560
1561         ctx->remap_slice = 0;
1562         return 0;
1563 }
1564
1565 static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
1566 {
1567         int ret;
1568
1569         if (!vm)
1570                 return 0;
1571
1572         ret = rq->engine->emit_flush(rq, EMIT_FLUSH);
1573         if (ret)
1574                 return ret;
1575
1576         /*
1577          * Not only do we need a full barrier (post-sync write) after
1578          * invalidating the TLBs, but we need to wait a little bit
1579          * longer. Whether this is merely delaying us, or the
1580          * subsequent flush is a key part of serialising with the
1581          * post-sync op, this extra pass appears vital before a
1582          * mm switch!
1583          */
1584         ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm), PP_DIR_DCLV_2G);
1585         if (ret)
1586                 return ret;
1587
1588         return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
1589 }
1590
1591 static int switch_context(struct i915_request *rq)
1592 {
1593         struct intel_context *ce = rq->context;
1594         int ret;
1595
1596         GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
1597
1598         ret = switch_mm(rq, vm_alias(ce));
1599         if (ret)
1600                 return ret;
1601
1602         if (ce->state) {
1603                 u32 flags;
1604
1605                 GEM_BUG_ON(rq->engine->id != RCS0);
1606
1607                 /* For resource streamer on HSW+ and power context elsewhere */
1608                 BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
1609                 BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN);
1610
1611                 flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT;
1612                 if (test_bit(CONTEXT_VALID_BIT, &ce->flags))
1613                         flags |= MI_RESTORE_EXT_STATE_EN;
1614                 else
1615                         flags |= MI_RESTORE_INHIBIT;
1616
1617                 ret = mi_set_context(rq, flags);
1618                 if (ret)
1619                         return ret;
1620         }
1621
1622         ret = remap_l3(rq);
1623         if (ret)
1624                 return ret;
1625
1626         return 0;
1627 }
1628
1629 static int ring_request_alloc(struct i915_request *request)
1630 {
1631         int ret;
1632
1633         GEM_BUG_ON(!intel_context_is_pinned(request->context));
1634         GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
1635
1636         /*
1637          * Flush enough space to reduce the likelihood of waiting after
1638          * we start building the request - in which case we will just
1639          * have to repeat work.
1640          */
1641         request->reserved_space += LEGACY_REQUEST_SIZE;
1642
1643         /* Unconditionally invalidate GPU caches and TLBs. */
1644         ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
1645         if (ret)
1646                 return ret;
1647
1648         ret = switch_context(request);
1649         if (ret)
1650                 return ret;
1651
1652         request->reserved_space -= LEGACY_REQUEST_SIZE;
1653         return 0;
1654 }
1655
1656 static void gen6_bsd_submit_request(struct i915_request *request)
1657 {
1658         struct intel_uncore *uncore = request->engine->uncore;
1659
1660         intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
1661
1662        /* Every tail move must follow the sequence below */
1663
1664         /* Disable notification that the ring is IDLE. The GT
1665          * will then assume that it is busy and bring it out of rc6.
1666          */
1667         intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
1668                               _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1669
1670         /* Clear the context id. Here be magic! */
1671         intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
1672
1673         /* Wait for the ring not to be idle, i.e. for it to wake up. */
1674         if (__intel_wait_for_register_fw(uncore,
1675                                          GEN6_BSD_SLEEP_PSMI_CONTROL,
1676                                          GEN6_BSD_SLEEP_INDICATOR,
1677                                          0,
1678                                          1000, 0, NULL))
1679                 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
1680
1681         /* Now that the ring is fully powered up, update the tail */
1682         i9xx_submit_request(request);
1683
1684         /* Let the ring send IDLE messages to the GT again,
1685          * and so let it sleep to conserve power when idle.
1686          */
1687         intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
1688                               _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1689
1690         intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1691 }
1692
1693 static int mi_flush_dw(struct i915_request *rq, u32 flags)
1694 {
1695         u32 cmd, *cs;
1696
1697         cs = intel_ring_begin(rq, 4);
1698         if (IS_ERR(cs))
1699                 return PTR_ERR(cs);
1700
1701         cmd = MI_FLUSH_DW;
1702
1703         /*
1704          * We always require a command barrier so that subsequent
1705          * commands, such as breadcrumb interrupts, are strictly ordered
1706          * wrt the contents of the write cache being flushed to memory
1707          * (and thus being coherent from the CPU).
1708          */
1709         cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1710
1711         /*
1712          * Bspec vol 1c.3 - blitter engine command streamer:
1713          * "If ENABLED, all TLBs will be invalidated once the flush
1714          * operation is complete. This bit is only valid when the
1715          * Post-Sync Operation field is a value of 1h or 3h."
1716          */
1717         cmd |= flags;
1718
1719         *cs++ = cmd;
1720         *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
1721         *cs++ = 0;
1722         *cs++ = MI_NOOP;
1723
1724         intel_ring_advance(rq, cs);
1725
1726         return 0;
1727 }
1728
1729 static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
1730 {
1731         return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
1732 }
1733
1734 static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
1735 {
1736         return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
1737 }
1738
1739 static int
1740 hsw_emit_bb_start(struct i915_request *rq,
1741                   u64 offset, u32 len,
1742                   unsigned int dispatch_flags)
1743 {
1744         u32 *cs;
1745
1746         cs = intel_ring_begin(rq, 2);
1747         if (IS_ERR(cs))
1748                 return PTR_ERR(cs);
1749
1750         *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
1751                 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW);
1752         /* bit0-7 is the length on GEN6+ */
1753         *cs++ = offset;
1754         intel_ring_advance(rq, cs);
1755
1756         return 0;
1757 }
1758
1759 static int
1760 gen6_emit_bb_start(struct i915_request *rq,
1761                    u64 offset, u32 len,
1762                    unsigned int dispatch_flags)
1763 {
1764         u32 *cs;
1765
1766         cs = intel_ring_begin(rq, 2);
1767         if (IS_ERR(cs))
1768                 return PTR_ERR(cs);
1769
1770         *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
1771                 0 : MI_BATCH_NON_SECURE_I965);
1772         /* bit0-7 is the length on GEN6+ */
1773         *cs++ = offset;
1774         intel_ring_advance(rq, cs);
1775
1776         return 0;
1777 }
1778
1779 /* Blitter support (SandyBridge+) */
1780
1781 static int gen6_ring_flush(struct i915_request *rq, u32 mode)
1782 {
1783         return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
1784 }
1785
1786 static void i9xx_set_default_submission(struct intel_engine_cs *engine)
1787 {
1788         engine->submit_request = i9xx_submit_request;
1789
1790         engine->park = NULL;
1791         engine->unpark = NULL;
1792 }
1793
1794 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
1795 {
1796         i9xx_set_default_submission(engine);
1797         engine->submit_request = gen6_bsd_submit_request;
1798 }
1799
1800 static void ring_release(struct intel_engine_cs *engine)
1801 {
1802         struct drm_i915_private *dev_priv = engine->i915;
1803
1804         WARN_ON(INTEL_GEN(dev_priv) > 2 &&
1805                 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
1806
1807         intel_engine_cleanup_common(engine);
1808
1809         intel_ring_unpin(engine->legacy.ring);
1810         intel_ring_put(engine->legacy.ring);
1811
1812         intel_timeline_unpin(engine->legacy.timeline);
1813         intel_timeline_put(engine->legacy.timeline);
1814 }
1815
1816 static void setup_irq(struct intel_engine_cs *engine)
1817 {
1818         struct drm_i915_private *i915 = engine->i915;
1819
1820         if (INTEL_GEN(i915) >= 6) {
1821                 engine->irq_enable = gen6_irq_enable;
1822                 engine->irq_disable = gen6_irq_disable;
1823         } else if (INTEL_GEN(i915) >= 5) {
1824                 engine->irq_enable = gen5_irq_enable;
1825                 engine->irq_disable = gen5_irq_disable;
1826         } else if (INTEL_GEN(i915) >= 3) {
1827                 engine->irq_enable = i9xx_irq_enable;
1828                 engine->irq_disable = i9xx_irq_disable;
1829         } else {
1830                 engine->irq_enable = i8xx_irq_enable;
1831                 engine->irq_disable = i8xx_irq_disable;
1832         }
1833 }
1834
1835 static void setup_common(struct intel_engine_cs *engine)
1836 {
1837         struct drm_i915_private *i915 = engine->i915;
1838
1839         /* gen8+ are only supported with execlists */
1840         GEM_BUG_ON(INTEL_GEN(i915) >= 8);
1841
1842         setup_irq(engine);
1843
1844         engine->resume = xcs_resume;
1845         engine->reset.prepare = reset_prepare;
1846         engine->reset.rewind = reset_rewind;
1847         engine->reset.cancel = reset_cancel;
1848         engine->reset.finish = reset_finish;
1849
1850         engine->cops = &ring_context_ops;
1851         engine->request_alloc = ring_request_alloc;
1852
1853         /*
1854          * Using a global execution timeline; the previous final breadcrumb is
1855          * equivalent to our next initial bread so we can elide
1856          * engine->emit_init_breadcrumb().
1857          */
1858         engine->emit_fini_breadcrumb = i9xx_emit_breadcrumb;
1859         if (IS_GEN(i915, 5))
1860                 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
1861
1862         engine->set_default_submission = i9xx_set_default_submission;
1863
1864         if (INTEL_GEN(i915) >= 6)
1865                 engine->emit_bb_start = gen6_emit_bb_start;
1866         else if (INTEL_GEN(i915) >= 4)
1867                 engine->emit_bb_start = i965_emit_bb_start;
1868         else if (IS_I830(i915) || IS_I845G(i915))
1869                 engine->emit_bb_start = i830_emit_bb_start;
1870         else
1871                 engine->emit_bb_start = i915_emit_bb_start;
1872 }
1873
1874 static void setup_rcs(struct intel_engine_cs *engine)
1875 {
1876         struct drm_i915_private *i915 = engine->i915;
1877
1878         if (HAS_L3_DPF(i915))
1879                 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
1880
1881         engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1882
1883         if (INTEL_GEN(i915) >= 7) {
1884                 engine->emit_flush = gen7_render_ring_flush;
1885                 engine->emit_fini_breadcrumb = gen7_rcs_emit_breadcrumb;
1886         } else if (IS_GEN(i915, 6)) {
1887                 engine->emit_flush = gen6_render_ring_flush;
1888                 engine->emit_fini_breadcrumb = gen6_rcs_emit_breadcrumb;
1889         } else if (IS_GEN(i915, 5)) {
1890                 engine->emit_flush = gen4_render_ring_flush;
1891         } else {
1892                 if (INTEL_GEN(i915) < 4)
1893                         engine->emit_flush = gen2_render_ring_flush;
1894                 else
1895                         engine->emit_flush = gen4_render_ring_flush;
1896                 engine->irq_enable_mask = I915_USER_INTERRUPT;
1897         }
1898
1899         if (IS_HASWELL(i915))
1900                 engine->emit_bb_start = hsw_emit_bb_start;
1901
1902         engine->resume = rcs_resume;
1903 }
1904
1905 static void setup_vcs(struct intel_engine_cs *engine)
1906 {
1907         struct drm_i915_private *i915 = engine->i915;
1908
1909         if (INTEL_GEN(i915) >= 6) {
1910                 /* gen6 bsd needs a special wa for tail updates */
1911                 if (IS_GEN(i915, 6))
1912                         engine->set_default_submission = gen6_bsd_set_default_submission;
1913                 engine->emit_flush = gen6_bsd_ring_flush;
1914                 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1915
1916                 if (IS_GEN(i915, 6))
1917                         engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
1918                 else
1919                         engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
1920         } else {
1921                 engine->emit_flush = bsd_ring_flush;
1922                 if (IS_GEN(i915, 5))
1923                         engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
1924                 else
1925                         engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
1926         }
1927 }
1928
1929 static void setup_bcs(struct intel_engine_cs *engine)
1930 {
1931         struct drm_i915_private *i915 = engine->i915;
1932
1933         engine->emit_flush = gen6_ring_flush;
1934         engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
1935
1936         if (IS_GEN(i915, 6))
1937                 engine->emit_fini_breadcrumb = gen6_xcs_emit_breadcrumb;
1938         else
1939                 engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
1940 }
1941
1942 static void setup_vecs(struct intel_engine_cs *engine)
1943 {
1944         struct drm_i915_private *i915 = engine->i915;
1945
1946         GEM_BUG_ON(INTEL_GEN(i915) < 7);
1947
1948         engine->emit_flush = gen6_ring_flush;
1949         engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
1950         engine->irq_enable = hsw_vebox_irq_enable;
1951         engine->irq_disable = hsw_vebox_irq_disable;
1952
1953         engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
1954 }
1955
1956 int intel_ring_submission_setup(struct intel_engine_cs *engine)
1957 {
1958         struct intel_timeline *timeline;
1959         struct intel_ring *ring;
1960         int err;
1961
1962         setup_common(engine);
1963
1964         switch (engine->class) {
1965         case RENDER_CLASS:
1966                 setup_rcs(engine);
1967                 break;
1968         case VIDEO_DECODE_CLASS:
1969                 setup_vcs(engine);
1970                 break;
1971         case COPY_ENGINE_CLASS:
1972                 setup_bcs(engine);
1973                 break;
1974         case VIDEO_ENHANCEMENT_CLASS:
1975                 setup_vecs(engine);
1976                 break;
1977         default:
1978                 MISSING_CASE(engine->class);
1979                 return -ENODEV;
1980         }
1981
1982         timeline = intel_timeline_create(engine->gt, engine->status_page.vma);
1983         if (IS_ERR(timeline)) {
1984                 err = PTR_ERR(timeline);
1985                 goto err;
1986         }
1987         GEM_BUG_ON(timeline->has_initial_breadcrumb);
1988
1989         err = intel_timeline_pin(timeline);
1990         if (err)
1991                 goto err_timeline;
1992
1993         ring = intel_engine_create_ring(engine, SZ_16K);
1994         if (IS_ERR(ring)) {
1995                 err = PTR_ERR(ring);
1996                 goto err_timeline_unpin;
1997         }
1998
1999         err = intel_ring_pin(ring);
2000         if (err)
2001                 goto err_ring;
2002
2003         GEM_BUG_ON(engine->legacy.ring);
2004         engine->legacy.ring = ring;
2005         engine->legacy.timeline = timeline;
2006
2007         GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
2008
2009         /* Finally, take ownership and responsibility for cleanup! */
2010         engine->release = ring_release;
2011
2012         return 0;
2013
2014 err_ring:
2015         intel_ring_put(ring);
2016 err_timeline_unpin:
2017         intel_timeline_unpin(timeline);
2018 err_timeline:
2019         intel_timeline_put(timeline);
2020 err:
2021         intel_engine_cleanup_common(engine);
2022         return err;
2023 }