Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Zou Nan hai <nanhai.zou@intel.com>
26  *    Xiang Hai hao<haihao.xiang@intel.com>
27  *
28  */
29
30 #include <linux/log2.h>
31
32 #include <drm/drmP.h>
33 #include <drm/i915_drm.h>
34
35 #include "i915_drv.h"
36 #include "i915_gem_render_state.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39 #include "intel_workarounds.h"
40
41 /* Rough estimate of the typical request size, performing a flush,
42  * set-context and then emitting the batch.
43  */
44 #define LEGACY_REQUEST_SIZE 200
45
46 static unsigned int __intel_ring_space(unsigned int head,
47                                        unsigned int tail,
48                                        unsigned int size)
49 {
50         /*
51          * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
52          * same cacheline, the Head Pointer must not be greater than the Tail
53          * Pointer."
54          */
55         GEM_BUG_ON(!is_power_of_2(size));
56         return (head - tail - CACHELINE_BYTES) & (size - 1);
57 }
58
59 unsigned int intel_ring_update_space(struct intel_ring *ring)
60 {
61         unsigned int space;
62
63         space = __intel_ring_space(ring->head, ring->emit, ring->size);
64
65         ring->space = space;
66         return space;
67 }
68
69 static int
70 gen2_render_ring_flush(struct i915_request *rq, u32 mode)
71 {
72         u32 cmd, *cs;
73
74         cmd = MI_FLUSH;
75
76         if (mode & EMIT_INVALIDATE)
77                 cmd |= MI_READ_FLUSH;
78
79         cs = intel_ring_begin(rq, 2);
80         if (IS_ERR(cs))
81                 return PTR_ERR(cs);
82
83         *cs++ = cmd;
84         *cs++ = MI_NOOP;
85         intel_ring_advance(rq, cs);
86
87         return 0;
88 }
89
90 static int
91 gen4_render_ring_flush(struct i915_request *rq, u32 mode)
92 {
93         u32 cmd, *cs;
94         int i;
95
96         /*
97          * read/write caches:
98          *
99          * I915_GEM_DOMAIN_RENDER is always invalidated, but is
100          * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
101          * also flushed at 2d versus 3d pipeline switches.
102          *
103          * read-only caches:
104          *
105          * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
106          * MI_READ_FLUSH is set, and is always flushed on 965.
107          *
108          * I915_GEM_DOMAIN_COMMAND may not exist?
109          *
110          * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
111          * invalidated when MI_EXE_FLUSH is set.
112          *
113          * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
114          * invalidated with every MI_FLUSH.
115          *
116          * TLBs:
117          *
118          * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
119          * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
120          * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
121          * are flushed at any MI_FLUSH.
122          */
123
124         cmd = MI_FLUSH;
125         if (mode & EMIT_INVALIDATE) {
126                 cmd |= MI_EXE_FLUSH;
127                 if (IS_G4X(rq->i915) || IS_GEN5(rq->i915))
128                         cmd |= MI_INVALIDATE_ISP;
129         }
130
131         i = 2;
132         if (mode & EMIT_INVALIDATE)
133                 i += 20;
134
135         cs = intel_ring_begin(rq, i);
136         if (IS_ERR(cs))
137                 return PTR_ERR(cs);
138
139         *cs++ = cmd;
140
141         /*
142          * A random delay to let the CS invalidate take effect? Without this
143          * delay, the GPU relocation path fails as the CS does not see
144          * the updated contents. Just as important, if we apply the flushes
145          * to the EMIT_FLUSH branch (i.e. immediately after the relocation
146          * write and before the invalidate on the next batch), the relocations
147          * still fail. This implies that is a delay following invalidation
148          * that is required to reset the caches as opposed to a delay to
149          * ensure the memory is written.
150          */
151         if (mode & EMIT_INVALIDATE) {
152                 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
153                 *cs++ = i915_ggtt_offset(rq->engine->scratch) |
154                         PIPE_CONTROL_GLOBAL_GTT;
155                 *cs++ = 0;
156                 *cs++ = 0;
157
158                 for (i = 0; i < 12; i++)
159                         *cs++ = MI_FLUSH;
160
161                 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
162                 *cs++ = i915_ggtt_offset(rq->engine->scratch) |
163                         PIPE_CONTROL_GLOBAL_GTT;
164                 *cs++ = 0;
165                 *cs++ = 0;
166         }
167
168         *cs++ = cmd;
169
170         intel_ring_advance(rq, cs);
171
172         return 0;
173 }
174
175 /*
176  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
177  * implementing two workarounds on gen6.  From section 1.4.7.1
178  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
179  *
180  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
181  * produced by non-pipelined state commands), software needs to first
182  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
183  * 0.
184  *
185  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
186  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
187  *
188  * And the workaround for these two requires this workaround first:
189  *
190  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
191  * BEFORE the pipe-control with a post-sync op and no write-cache
192  * flushes.
193  *
194  * And this last workaround is tricky because of the requirements on
195  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
196  * volume 2 part 1:
197  *
198  *     "1 of the following must also be set:
199  *      - Render Target Cache Flush Enable ([12] of DW1)
200  *      - Depth Cache Flush Enable ([0] of DW1)
201  *      - Stall at Pixel Scoreboard ([1] of DW1)
202  *      - Depth Stall ([13] of DW1)
203  *      - Post-Sync Operation ([13] of DW1)
204  *      - Notify Enable ([8] of DW1)"
205  *
206  * The cache flushes require the workaround flush that triggered this
207  * one, so we can't use it.  Depth stall would trigger the same.
208  * Post-sync nonzero is what triggered this second workaround, so we
209  * can't use that one either.  Notify enable is IRQs, which aren't
210  * really our business.  That leaves only stall at scoreboard.
211  */
212 static int
213 intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
214 {
215         u32 scratch_addr =
216                 i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
217         u32 *cs;
218
219         cs = intel_ring_begin(rq, 6);
220         if (IS_ERR(cs))
221                 return PTR_ERR(cs);
222
223         *cs++ = GFX_OP_PIPE_CONTROL(5);
224         *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
225         *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
226         *cs++ = 0; /* low dword */
227         *cs++ = 0; /* high dword */
228         *cs++ = MI_NOOP;
229         intel_ring_advance(rq, cs);
230
231         cs = intel_ring_begin(rq, 6);
232         if (IS_ERR(cs))
233                 return PTR_ERR(cs);
234
235         *cs++ = GFX_OP_PIPE_CONTROL(5);
236         *cs++ = PIPE_CONTROL_QW_WRITE;
237         *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
238         *cs++ = 0;
239         *cs++ = 0;
240         *cs++ = MI_NOOP;
241         intel_ring_advance(rq, cs);
242
243         return 0;
244 }
245
246 static int
247 gen6_render_ring_flush(struct i915_request *rq, u32 mode)
248 {
249         u32 scratch_addr =
250                 i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
251         u32 *cs, flags = 0;
252         int ret;
253
254         /* Force SNB workarounds for PIPE_CONTROL flushes */
255         ret = intel_emit_post_sync_nonzero_flush(rq);
256         if (ret)
257                 return ret;
258
259         /* Just flush everything.  Experiments have shown that reducing the
260          * number of bits based on the write domains has little performance
261          * impact.
262          */
263         if (mode & EMIT_FLUSH) {
264                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
265                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
266                 /*
267                  * Ensure that any following seqno writes only happen
268                  * when the render cache is indeed flushed.
269                  */
270                 flags |= PIPE_CONTROL_CS_STALL;
271         }
272         if (mode & EMIT_INVALIDATE) {
273                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
274                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
275                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
276                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
277                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
278                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
279                 /*
280                  * TLB invalidate requires a post-sync write.
281                  */
282                 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
283         }
284
285         cs = intel_ring_begin(rq, 4);
286         if (IS_ERR(cs))
287                 return PTR_ERR(cs);
288
289         *cs++ = GFX_OP_PIPE_CONTROL(4);
290         *cs++ = flags;
291         *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
292         *cs++ = 0;
293         intel_ring_advance(rq, cs);
294
295         return 0;
296 }
297
298 static int
299 gen7_render_ring_cs_stall_wa(struct i915_request *rq)
300 {
301         u32 *cs;
302
303         cs = intel_ring_begin(rq, 4);
304         if (IS_ERR(cs))
305                 return PTR_ERR(cs);
306
307         *cs++ = GFX_OP_PIPE_CONTROL(4);
308         *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
309         *cs++ = 0;
310         *cs++ = 0;
311         intel_ring_advance(rq, cs);
312
313         return 0;
314 }
315
316 static int
317 gen7_render_ring_flush(struct i915_request *rq, u32 mode)
318 {
319         u32 scratch_addr =
320                 i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
321         u32 *cs, flags = 0;
322
323         /*
324          * Ensure that any following seqno writes only happen when the render
325          * cache is indeed flushed.
326          *
327          * Workaround: 4th PIPE_CONTROL command (except the ones with only
328          * read-cache invalidate bits set) must have the CS_STALL bit set. We
329          * don't try to be clever and just set it unconditionally.
330          */
331         flags |= PIPE_CONTROL_CS_STALL;
332
333         /* Just flush everything.  Experiments have shown that reducing the
334          * number of bits based on the write domains has little performance
335          * impact.
336          */
337         if (mode & EMIT_FLUSH) {
338                 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
339                 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
340                 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
341                 flags |= PIPE_CONTROL_FLUSH_ENABLE;
342         }
343         if (mode & EMIT_INVALIDATE) {
344                 flags |= PIPE_CONTROL_TLB_INVALIDATE;
345                 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
346                 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
347                 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
348                 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
349                 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
350                 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
351                 /*
352                  * TLB invalidate requires a post-sync write.
353                  */
354                 flags |= PIPE_CONTROL_QW_WRITE;
355                 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
356
357                 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
358
359                 /* Workaround: we must issue a pipe_control with CS-stall bit
360                  * set before a pipe_control command that has the state cache
361                  * invalidate bit set. */
362                 gen7_render_ring_cs_stall_wa(rq);
363         }
364
365         cs = intel_ring_begin(rq, 4);
366         if (IS_ERR(cs))
367                 return PTR_ERR(cs);
368
369         *cs++ = GFX_OP_PIPE_CONTROL(4);
370         *cs++ = flags;
371         *cs++ = scratch_addr;
372         *cs++ = 0;
373         intel_ring_advance(rq, cs);
374
375         return 0;
376 }
377
378 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
379 {
380         struct drm_i915_private *dev_priv = engine->i915;
381         struct page *page = virt_to_page(engine->status_page.page_addr);
382         phys_addr_t phys = PFN_PHYS(page_to_pfn(page));
383         u32 addr;
384
385         addr = lower_32_bits(phys);
386         if (INTEL_GEN(dev_priv) >= 4)
387                 addr |= (phys >> 28) & 0xf0;
388
389         I915_WRITE(HWS_PGA, addr);
390 }
391
392 static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
393 {
394         struct drm_i915_private *dev_priv = engine->i915;
395         i915_reg_t mmio;
396
397         /* The ring status page addresses are no longer next to the rest of
398          * the ring registers as of gen7.
399          */
400         if (IS_GEN7(dev_priv)) {
401                 switch (engine->id) {
402                 /*
403                  * No more rings exist on Gen7. Default case is only to shut up
404                  * gcc switch check warning.
405                  */
406                 default:
407                         GEM_BUG_ON(engine->id);
408                 case RCS:
409                         mmio = RENDER_HWS_PGA_GEN7;
410                         break;
411                 case BCS:
412                         mmio = BLT_HWS_PGA_GEN7;
413                         break;
414                 case VCS:
415                         mmio = BSD_HWS_PGA_GEN7;
416                         break;
417                 case VECS:
418                         mmio = VEBOX_HWS_PGA_GEN7;
419                         break;
420                 }
421         } else if (IS_GEN6(dev_priv)) {
422                 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
423         } else {
424                 mmio = RING_HWS_PGA(engine->mmio_base);
425         }
426
427         if (INTEL_GEN(dev_priv) >= 6) {
428                 u32 mask = ~0u;
429
430                 /*
431                  * Keep the render interrupt unmasked as this papers over
432                  * lost interrupts following a reset.
433                  */
434                 if (engine->id == RCS)
435                         mask &= ~BIT(0);
436
437                 I915_WRITE(RING_HWSTAM(engine->mmio_base), mask);
438         }
439
440         I915_WRITE(mmio, engine->status_page.ggtt_offset);
441         POSTING_READ(mmio);
442
443         /* Flush the TLB for this page */
444         if (IS_GEN(dev_priv, 6, 7)) {
445                 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
446
447                 /* ring should be idle before issuing a sync flush*/
448                 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
449
450                 I915_WRITE(reg,
451                            _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
452                                               INSTPM_SYNC_FLUSH));
453                 if (intel_wait_for_register(dev_priv,
454                                             reg, INSTPM_SYNC_FLUSH, 0,
455                                             1000))
456                         DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
457                                   engine->name);
458         }
459 }
460
461 static bool stop_ring(struct intel_engine_cs *engine)
462 {
463         struct drm_i915_private *dev_priv = engine->i915;
464
465         if (INTEL_GEN(dev_priv) > 2) {
466                 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
467                 if (intel_wait_for_register(dev_priv,
468                                             RING_MI_MODE(engine->mmio_base),
469                                             MODE_IDLE,
470                                             MODE_IDLE,
471                                             1000)) {
472                         DRM_ERROR("%s : timed out trying to stop ring\n",
473                                   engine->name);
474                         /* Sometimes we observe that the idle flag is not
475                          * set even though the ring is empty. So double
476                          * check before giving up.
477                          */
478                         if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
479                                 return false;
480                 }
481         }
482
483         I915_WRITE_HEAD(engine, I915_READ_TAIL(engine));
484
485         I915_WRITE_HEAD(engine, 0);
486         I915_WRITE_TAIL(engine, 0);
487
488         /* The ring must be empty before it is disabled */
489         I915_WRITE_CTL(engine, 0);
490
491         return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
492 }
493
494 static int init_ring_common(struct intel_engine_cs *engine)
495 {
496         struct drm_i915_private *dev_priv = engine->i915;
497         struct intel_ring *ring = engine->buffer;
498         int ret = 0;
499
500         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
501
502         if (!stop_ring(engine)) {
503                 /* G45 ring initialization often fails to reset head to zero */
504                 DRM_DEBUG_DRIVER("%s head not reset to zero "
505                                 "ctl %08x head %08x tail %08x start %08x\n",
506                                 engine->name,
507                                 I915_READ_CTL(engine),
508                                 I915_READ_HEAD(engine),
509                                 I915_READ_TAIL(engine),
510                                 I915_READ_START(engine));
511
512                 if (!stop_ring(engine)) {
513                         DRM_ERROR("failed to set %s head to zero "
514                                   "ctl %08x head %08x tail %08x start %08x\n",
515                                   engine->name,
516                                   I915_READ_CTL(engine),
517                                   I915_READ_HEAD(engine),
518                                   I915_READ_TAIL(engine),
519                                   I915_READ_START(engine));
520                         ret = -EIO;
521                         goto out;
522                 }
523         }
524
525         if (HWS_NEEDS_PHYSICAL(dev_priv))
526                 ring_setup_phys_status_page(engine);
527         else
528                 intel_ring_setup_status_page(engine);
529
530         intel_engine_reset_breadcrumbs(engine);
531
532         /* Enforce ordering by reading HEAD register back */
533         I915_READ_HEAD(engine);
534
535         /* Initialize the ring. This must happen _after_ we've cleared the ring
536          * registers with the above sequence (the readback of the HEAD registers
537          * also enforces ordering), otherwise the hw might lose the new ring
538          * register values. */
539         I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
540
541         /* WaClearRingBufHeadRegAtInit:ctg,elk */
542         if (I915_READ_HEAD(engine))
543                 DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
544                                  engine->name, I915_READ_HEAD(engine));
545
546         /* Check that the ring offsets point within the ring! */
547         GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
548         GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
549
550         intel_ring_update_space(ring);
551         I915_WRITE_HEAD(engine, ring->head);
552         I915_WRITE_TAIL(engine, ring->tail);
553         (void)I915_READ_TAIL(engine);
554
555         I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
556
557         /* If the head is still not zero, the ring is dead */
558         if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base),
559                                     RING_VALID, RING_VALID,
560                                     50)) {
561                 DRM_ERROR("%s initialization failed "
562                           "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
563                           engine->name,
564                           I915_READ_CTL(engine),
565                           I915_READ_CTL(engine) & RING_VALID,
566                           I915_READ_HEAD(engine), ring->head,
567                           I915_READ_TAIL(engine), ring->tail,
568                           I915_READ_START(engine),
569                           i915_ggtt_offset(ring->vma));
570                 ret = -EIO;
571                 goto out;
572         }
573
574         if (INTEL_GEN(dev_priv) > 2)
575                 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
576
577         /* Papering over lost _interrupts_ immediately following the restart */
578         intel_engine_wakeup(engine);
579 out:
580         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
581
582         return ret;
583 }
584
585 static struct i915_request *reset_prepare(struct intel_engine_cs *engine)
586 {
587         intel_engine_stop_cs(engine);
588
589         if (engine->irq_seqno_barrier)
590                 engine->irq_seqno_barrier(engine);
591
592         return i915_gem_find_active_request(engine);
593 }
594
595 static void skip_request(struct i915_request *rq)
596 {
597         void *vaddr = rq->ring->vaddr;
598         u32 head;
599
600         head = rq->infix;
601         if (rq->postfix < head) {
602                 memset32(vaddr + head, MI_NOOP,
603                          (rq->ring->size - head) / sizeof(u32));
604                 head = 0;
605         }
606         memset32(vaddr + head, MI_NOOP, (rq->postfix - head) / sizeof(u32));
607 }
608
609 static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq)
610 {
611         GEM_TRACE("%s seqno=%x\n", engine->name, rq ? rq->global_seqno : 0);
612
613         /*
614          * Try to restore the logical GPU state to match the continuation
615          * of the request queue. If we skip the context/PD restore, then
616          * the next request may try to execute assuming that its context
617          * is valid and loaded on the GPU and so may try to access invalid
618          * memory, prompting repeated GPU hangs.
619          *
620          * If the request was guilty, we still restore the logical state
621          * in case the next request requires it (e.g. the aliasing ppgtt),
622          * but skip over the hung batch.
623          *
624          * If the request was innocent, we try to replay the request with
625          * the restored context.
626          */
627         if (rq) {
628                 /* If the rq hung, jump to its breadcrumb and skip the batch */
629                 rq->ring->head = intel_ring_wrap(rq->ring, rq->head);
630                 if (rq->fence.error == -EIO)
631                         skip_request(rq);
632         }
633 }
634
635 static void reset_finish(struct intel_engine_cs *engine)
636 {
637 }
638
639 static int intel_rcs_ctx_init(struct i915_request *rq)
640 {
641         int ret;
642
643         ret = intel_ctx_workarounds_emit(rq);
644         if (ret != 0)
645                 return ret;
646
647         ret = i915_gem_render_state_emit(rq);
648         if (ret)
649                 return ret;
650
651         return 0;
652 }
653
654 static int init_render_ring(struct intel_engine_cs *engine)
655 {
656         struct drm_i915_private *dev_priv = engine->i915;
657         int ret = init_ring_common(engine);
658         if (ret)
659                 return ret;
660
661         intel_whitelist_workarounds_apply(engine);
662
663         /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
664         if (IS_GEN(dev_priv, 4, 6))
665                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
666
667         /* We need to disable the AsyncFlip performance optimisations in order
668          * to use MI_WAIT_FOR_EVENT within the CS. It should already be
669          * programmed to '1' on all products.
670          *
671          * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
672          */
673         if (IS_GEN(dev_priv, 6, 7))
674                 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
675
676         /* Required for the hardware to program scanline values for waiting */
677         /* WaEnableFlushTlbInvalidationMode:snb */
678         if (IS_GEN6(dev_priv))
679                 I915_WRITE(GFX_MODE,
680                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
681
682         /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
683         if (IS_GEN7(dev_priv))
684                 I915_WRITE(GFX_MODE_GEN7,
685                            _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
686                            _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
687
688         if (IS_GEN6(dev_priv)) {
689                 /* From the Sandybridge PRM, volume 1 part 3, page 24:
690                  * "If this bit is set, STCunit will have LRA as replacement
691                  *  policy. [...] This bit must be reset.  LRA replacement
692                  *  policy is not supported."
693                  */
694                 I915_WRITE(CACHE_MODE_0,
695                            _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
696         }
697
698         if (IS_GEN(dev_priv, 6, 7))
699                 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
700
701         if (INTEL_GEN(dev_priv) >= 6)
702                 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
703
704         return 0;
705 }
706
707 static u32 *gen6_signal(struct i915_request *rq, u32 *cs)
708 {
709         struct drm_i915_private *dev_priv = rq->i915;
710         struct intel_engine_cs *engine;
711         enum intel_engine_id id;
712         int num_rings = 0;
713
714         for_each_engine(engine, dev_priv, id) {
715                 i915_reg_t mbox_reg;
716
717                 if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
718                         continue;
719
720                 mbox_reg = rq->engine->semaphore.mbox.signal[engine->hw_id];
721                 if (i915_mmio_reg_valid(mbox_reg)) {
722                         *cs++ = MI_LOAD_REGISTER_IMM(1);
723                         *cs++ = i915_mmio_reg_offset(mbox_reg);
724                         *cs++ = rq->global_seqno;
725                         num_rings++;
726                 }
727         }
728         if (num_rings & 1)
729                 *cs++ = MI_NOOP;
730
731         return cs;
732 }
733
734 static void cancel_requests(struct intel_engine_cs *engine)
735 {
736         struct i915_request *request;
737         unsigned long flags;
738
739         spin_lock_irqsave(&engine->timeline.lock, flags);
740
741         /* Mark all submitted requests as skipped. */
742         list_for_each_entry(request, &engine->timeline.requests, link) {
743                 GEM_BUG_ON(!request->global_seqno);
744                 if (!i915_request_completed(request))
745                         dma_fence_set_error(&request->fence, -EIO);
746         }
747         /* Remaining _unready_ requests will be nop'ed when submitted */
748
749         spin_unlock_irqrestore(&engine->timeline.lock, flags);
750 }
751
752 static void i9xx_submit_request(struct i915_request *request)
753 {
754         struct drm_i915_private *dev_priv = request->i915;
755
756         i915_request_submit(request);
757
758         I915_WRITE_TAIL(request->engine,
759                         intel_ring_set_tail(request->ring, request->tail));
760 }
761
762 static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
763 {
764         *cs++ = MI_STORE_DWORD_INDEX;
765         *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
766         *cs++ = rq->global_seqno;
767         *cs++ = MI_USER_INTERRUPT;
768
769         rq->tail = intel_ring_offset(rq, cs);
770         assert_ring_tail_valid(rq->ring, rq->tail);
771 }
772
773 static const int i9xx_emit_breadcrumb_sz = 4;
774
775 static void gen6_sema_emit_breadcrumb(struct i915_request *rq, u32 *cs)
776 {
777         return i9xx_emit_breadcrumb(rq, rq->engine->semaphore.signal(rq, cs));
778 }
779
780 static int
781 gen6_ring_sync_to(struct i915_request *rq, struct i915_request *signal)
782 {
783         u32 dw1 = MI_SEMAPHORE_MBOX |
784                   MI_SEMAPHORE_COMPARE |
785                   MI_SEMAPHORE_REGISTER;
786         u32 wait_mbox = signal->engine->semaphore.mbox.wait[rq->engine->hw_id];
787         u32 *cs;
788
789         WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
790
791         cs = intel_ring_begin(rq, 4);
792         if (IS_ERR(cs))
793                 return PTR_ERR(cs);
794
795         *cs++ = dw1 | wait_mbox;
796         /* Throughout all of the GEM code, seqno passed implies our current
797          * seqno is >= the last seqno executed. However for hardware the
798          * comparison is strictly greater than.
799          */
800         *cs++ = signal->global_seqno - 1;
801         *cs++ = 0;
802         *cs++ = MI_NOOP;
803         intel_ring_advance(rq, cs);
804
805         return 0;
806 }
807
808 static void
809 gen5_seqno_barrier(struct intel_engine_cs *engine)
810 {
811         /* MI_STORE are internally buffered by the GPU and not flushed
812          * either by MI_FLUSH or SyncFlush or any other combination of
813          * MI commands.
814          *
815          * "Only the submission of the store operation is guaranteed.
816          * The write result will be complete (coherent) some time later
817          * (this is practically a finite period but there is no guaranteed
818          * latency)."
819          *
820          * Empirically, we observe that we need a delay of at least 75us to
821          * be sure that the seqno write is visible by the CPU.
822          */
823         usleep_range(125, 250);
824 }
825
826 static void
827 gen6_seqno_barrier(struct intel_engine_cs *engine)
828 {
829         struct drm_i915_private *dev_priv = engine->i915;
830
831         /* Workaround to force correct ordering between irq and seqno writes on
832          * ivb (and maybe also on snb) by reading from a CS register (like
833          * ACTHD) before reading the status page.
834          *
835          * Note that this effectively stalls the read by the time it takes to
836          * do a memory transaction, which more or less ensures that the write
837          * from the GPU has sufficient time to invalidate the CPU cacheline.
838          * Alternatively we could delay the interrupt from the CS ring to give
839          * the write time to land, but that would incur a delay after every
840          * batch i.e. much more frequent than a delay when waiting for the
841          * interrupt (with the same net latency).
842          *
843          * Also note that to prevent whole machine hangs on gen7, we have to
844          * take the spinlock to guard against concurrent cacheline access.
845          */
846         spin_lock_irq(&dev_priv->uncore.lock);
847         POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
848         spin_unlock_irq(&dev_priv->uncore.lock);
849 }
850
851 static void
852 gen5_irq_enable(struct intel_engine_cs *engine)
853 {
854         gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
855 }
856
857 static void
858 gen5_irq_disable(struct intel_engine_cs *engine)
859 {
860         gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
861 }
862
863 static void
864 i9xx_irq_enable(struct intel_engine_cs *engine)
865 {
866         struct drm_i915_private *dev_priv = engine->i915;
867
868         dev_priv->irq_mask &= ~engine->irq_enable_mask;
869         I915_WRITE(IMR, dev_priv->irq_mask);
870         POSTING_READ_FW(RING_IMR(engine->mmio_base));
871 }
872
873 static void
874 i9xx_irq_disable(struct intel_engine_cs *engine)
875 {
876         struct drm_i915_private *dev_priv = engine->i915;
877
878         dev_priv->irq_mask |= engine->irq_enable_mask;
879         I915_WRITE(IMR, dev_priv->irq_mask);
880 }
881
882 static void
883 i8xx_irq_enable(struct intel_engine_cs *engine)
884 {
885         struct drm_i915_private *dev_priv = engine->i915;
886
887         dev_priv->irq_mask &= ~engine->irq_enable_mask;
888         I915_WRITE16(IMR, dev_priv->irq_mask);
889         POSTING_READ16(RING_IMR(engine->mmio_base));
890 }
891
892 static void
893 i8xx_irq_disable(struct intel_engine_cs *engine)
894 {
895         struct drm_i915_private *dev_priv = engine->i915;
896
897         dev_priv->irq_mask |= engine->irq_enable_mask;
898         I915_WRITE16(IMR, dev_priv->irq_mask);
899 }
900
901 static int
902 bsd_ring_flush(struct i915_request *rq, u32 mode)
903 {
904         u32 *cs;
905
906         cs = intel_ring_begin(rq, 2);
907         if (IS_ERR(cs))
908                 return PTR_ERR(cs);
909
910         *cs++ = MI_FLUSH;
911         *cs++ = MI_NOOP;
912         intel_ring_advance(rq, cs);
913         return 0;
914 }
915
916 static void
917 gen6_irq_enable(struct intel_engine_cs *engine)
918 {
919         struct drm_i915_private *dev_priv = engine->i915;
920
921         I915_WRITE_IMR(engine,
922                        ~(engine->irq_enable_mask |
923                          engine->irq_keep_mask));
924         gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
925 }
926
927 static void
928 gen6_irq_disable(struct intel_engine_cs *engine)
929 {
930         struct drm_i915_private *dev_priv = engine->i915;
931
932         I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
933         gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
934 }
935
936 static void
937 hsw_vebox_irq_enable(struct intel_engine_cs *engine)
938 {
939         struct drm_i915_private *dev_priv = engine->i915;
940
941         I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
942         gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
943 }
944
945 static void
946 hsw_vebox_irq_disable(struct intel_engine_cs *engine)
947 {
948         struct drm_i915_private *dev_priv = engine->i915;
949
950         I915_WRITE_IMR(engine, ~0);
951         gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
952 }
953
954 static int
955 i965_emit_bb_start(struct i915_request *rq,
956                    u64 offset, u32 length,
957                    unsigned int dispatch_flags)
958 {
959         u32 *cs;
960
961         cs = intel_ring_begin(rq, 2);
962         if (IS_ERR(cs))
963                 return PTR_ERR(cs);
964
965         *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
966                 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
967         *cs++ = offset;
968         intel_ring_advance(rq, cs);
969
970         return 0;
971 }
972
973 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
974 #define I830_BATCH_LIMIT (256*1024)
975 #define I830_TLB_ENTRIES (2)
976 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
977 static int
978 i830_emit_bb_start(struct i915_request *rq,
979                    u64 offset, u32 len,
980                    unsigned int dispatch_flags)
981 {
982         u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
983
984         cs = intel_ring_begin(rq, 6);
985         if (IS_ERR(cs))
986                 return PTR_ERR(cs);
987
988         /* Evict the invalid PTE TLBs */
989         *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
990         *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
991         *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
992         *cs++ = cs_offset;
993         *cs++ = 0xdeadbeef;
994         *cs++ = MI_NOOP;
995         intel_ring_advance(rq, cs);
996
997         if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
998                 if (len > I830_BATCH_LIMIT)
999                         return -ENOSPC;
1000
1001                 cs = intel_ring_begin(rq, 6 + 2);
1002                 if (IS_ERR(cs))
1003                         return PTR_ERR(cs);
1004
1005                 /* Blit the batch (which has now all relocs applied) to the
1006                  * stable batch scratch bo area (so that the CS never
1007                  * stumbles over its tlb invalidation bug) ...
1008                  */
1009                 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
1010                 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
1011                 *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
1012                 *cs++ = cs_offset;
1013                 *cs++ = 4096;
1014                 *cs++ = offset;
1015
1016                 *cs++ = MI_FLUSH;
1017                 *cs++ = MI_NOOP;
1018                 intel_ring_advance(rq, cs);
1019
1020                 /* ... and execute it. */
1021                 offset = cs_offset;
1022         }
1023
1024         cs = intel_ring_begin(rq, 2);
1025         if (IS_ERR(cs))
1026                 return PTR_ERR(cs);
1027
1028         *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1029         *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1030                 MI_BATCH_NON_SECURE);
1031         intel_ring_advance(rq, cs);
1032
1033         return 0;
1034 }
1035
1036 static int
1037 i915_emit_bb_start(struct i915_request *rq,
1038                    u64 offset, u32 len,
1039                    unsigned int dispatch_flags)
1040 {
1041         u32 *cs;
1042
1043         cs = intel_ring_begin(rq, 2);
1044         if (IS_ERR(cs))
1045                 return PTR_ERR(cs);
1046
1047         *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1048         *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1049                 MI_BATCH_NON_SECURE);
1050         intel_ring_advance(rq, cs);
1051
1052         return 0;
1053 }
1054
1055 int intel_ring_pin(struct intel_ring *ring)
1056 {
1057         struct i915_vma *vma = ring->vma;
1058         enum i915_map_type map =
1059                 HAS_LLC(vma->vm->i915) ? I915_MAP_WB : I915_MAP_WC;
1060         unsigned int flags;
1061         void *addr;
1062         int ret;
1063
1064         GEM_BUG_ON(ring->vaddr);
1065
1066         flags = PIN_GLOBAL;
1067
1068         /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1069         flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
1070
1071         if (vma->obj->stolen)
1072                 flags |= PIN_MAPPABLE;
1073         else
1074                 flags |= PIN_HIGH;
1075
1076         if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1077                 if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
1078                         ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1079                 else
1080                         ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
1081                 if (unlikely(ret))
1082                         return ret;
1083         }
1084
1085         ret = i915_vma_pin(vma, 0, 0, flags);
1086         if (unlikely(ret))
1087                 return ret;
1088
1089         if (i915_vma_is_map_and_fenceable(vma))
1090                 addr = (void __force *)i915_vma_pin_iomap(vma);
1091         else
1092                 addr = i915_gem_object_pin_map(vma->obj, map);
1093         if (IS_ERR(addr))
1094                 goto err;
1095
1096         vma->obj->pin_global++;
1097
1098         ring->vaddr = addr;
1099         return 0;
1100
1101 err:
1102         i915_vma_unpin(vma);
1103         return PTR_ERR(addr);
1104 }
1105
1106 void intel_ring_reset(struct intel_ring *ring, u32 tail)
1107 {
1108         GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
1109
1110         ring->tail = tail;
1111         ring->head = tail;
1112         ring->emit = tail;
1113         intel_ring_update_space(ring);
1114 }
1115
1116 void intel_ring_unpin(struct intel_ring *ring)
1117 {
1118         GEM_BUG_ON(!ring->vma);
1119         GEM_BUG_ON(!ring->vaddr);
1120
1121         /* Discard any unused bytes beyond that submitted to hw. */
1122         intel_ring_reset(ring, ring->tail);
1123
1124         if (i915_vma_is_map_and_fenceable(ring->vma))
1125                 i915_vma_unpin_iomap(ring->vma);
1126         else
1127                 i915_gem_object_unpin_map(ring->vma->obj);
1128         ring->vaddr = NULL;
1129
1130         ring->vma->obj->pin_global--;
1131         i915_vma_unpin(ring->vma);
1132 }
1133
1134 static struct i915_vma *
1135 intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
1136 {
1137         struct i915_address_space *vm = &dev_priv->ggtt.vm;
1138         struct drm_i915_gem_object *obj;
1139         struct i915_vma *vma;
1140
1141         obj = i915_gem_object_create_stolen(dev_priv, size);
1142         if (!obj)
1143                 obj = i915_gem_object_create_internal(dev_priv, size);
1144         if (IS_ERR(obj))
1145                 return ERR_CAST(obj);
1146
1147         /*
1148          * Mark ring buffers as read-only from GPU side (so no stray overwrites)
1149          * if supported by the platform's GGTT.
1150          */
1151         if (vm->has_read_only)
1152                 i915_gem_object_set_readonly(obj);
1153
1154         vma = i915_vma_instance(obj, vm, NULL);
1155         if (IS_ERR(vma))
1156                 goto err;
1157
1158         return vma;
1159
1160 err:
1161         i915_gem_object_put(obj);
1162         return vma;
1163 }
1164
1165 struct intel_ring *
1166 intel_engine_create_ring(struct intel_engine_cs *engine,
1167                          struct i915_timeline *timeline,
1168                          int size)
1169 {
1170         struct intel_ring *ring;
1171         struct i915_vma *vma;
1172
1173         GEM_BUG_ON(!is_power_of_2(size));
1174         GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
1175         GEM_BUG_ON(timeline == &engine->timeline);
1176         lockdep_assert_held(&engine->i915->drm.struct_mutex);
1177
1178         ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1179         if (!ring)
1180                 return ERR_PTR(-ENOMEM);
1181
1182         INIT_LIST_HEAD(&ring->request_list);
1183         ring->timeline = i915_timeline_get(timeline);
1184
1185         ring->size = size;
1186         /* Workaround an erratum on the i830 which causes a hang if
1187          * the TAIL pointer points to within the last 2 cachelines
1188          * of the buffer.
1189          */
1190         ring->effective_size = size;
1191         if (IS_I830(engine->i915) || IS_I845G(engine->i915))
1192                 ring->effective_size -= 2 * CACHELINE_BYTES;
1193
1194         intel_ring_update_space(ring);
1195
1196         vma = intel_ring_create_vma(engine->i915, size);
1197         if (IS_ERR(vma)) {
1198                 kfree(ring);
1199                 return ERR_CAST(vma);
1200         }
1201         ring->vma = vma;
1202
1203         return ring;
1204 }
1205
1206 void
1207 intel_ring_free(struct intel_ring *ring)
1208 {
1209         struct drm_i915_gem_object *obj = ring->vma->obj;
1210
1211         i915_vma_close(ring->vma);
1212         __i915_gem_object_release_unless_active(obj);
1213
1214         i915_timeline_put(ring->timeline);
1215         kfree(ring);
1216 }
1217
1218 static void intel_ring_context_destroy(struct intel_context *ce)
1219 {
1220         GEM_BUG_ON(ce->pin_count);
1221
1222         if (!ce->state)
1223                 return;
1224
1225         GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
1226         i915_gem_object_put(ce->state->obj);
1227 }
1228
1229 static int __context_pin_ppgtt(struct i915_gem_context *ctx)
1230 {
1231         struct i915_hw_ppgtt *ppgtt;
1232         int err = 0;
1233
1234         ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
1235         if (ppgtt)
1236                 err = gen6_ppgtt_pin(ppgtt);
1237
1238         return err;
1239 }
1240
1241 static void __context_unpin_ppgtt(struct i915_gem_context *ctx)
1242 {
1243         struct i915_hw_ppgtt *ppgtt;
1244
1245         ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
1246         if (ppgtt)
1247                 gen6_ppgtt_unpin(ppgtt);
1248 }
1249
1250 static int __context_pin(struct intel_context *ce)
1251 {
1252         struct i915_vma *vma;
1253         int err;
1254
1255         vma = ce->state;
1256         if (!vma)
1257                 return 0;
1258
1259         /*
1260          * Clear this page out of any CPU caches for coherent swap-in/out.
1261          * We only want to do this on the first bind so that we do not stall
1262          * on an active context (which by nature is already on the GPU).
1263          */
1264         if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1265                 err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1266                 if (err)
1267                         return err;
1268         }
1269
1270         err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1271         if (err)
1272                 return err;
1273
1274         /*
1275          * And mark is as a globally pinned object to let the shrinker know
1276          * it cannot reclaim the object until we release it.
1277          */
1278         vma->obj->pin_global++;
1279
1280         return 0;
1281 }
1282
1283 static void __context_unpin(struct intel_context *ce)
1284 {
1285         struct i915_vma *vma;
1286
1287         vma = ce->state;
1288         if (!vma)
1289                 return;
1290
1291         vma->obj->pin_global--;
1292         i915_vma_unpin(vma);
1293 }
1294
1295 static void intel_ring_context_unpin(struct intel_context *ce)
1296 {
1297         __context_unpin_ppgtt(ce->gem_context);
1298         __context_unpin(ce);
1299
1300         i915_gem_context_put(ce->gem_context);
1301 }
1302
1303 static struct i915_vma *
1304 alloc_context_vma(struct intel_engine_cs *engine)
1305 {
1306         struct drm_i915_private *i915 = engine->i915;
1307         struct drm_i915_gem_object *obj;
1308         struct i915_vma *vma;
1309         int err;
1310
1311         obj = i915_gem_object_create(i915, engine->context_size);
1312         if (IS_ERR(obj))
1313                 return ERR_CAST(obj);
1314
1315         if (engine->default_state) {
1316                 void *defaults, *vaddr;
1317
1318                 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1319                 if (IS_ERR(vaddr)) {
1320                         err = PTR_ERR(vaddr);
1321                         goto err_obj;
1322                 }
1323
1324                 defaults = i915_gem_object_pin_map(engine->default_state,
1325                                                    I915_MAP_WB);
1326                 if (IS_ERR(defaults)) {
1327                         err = PTR_ERR(defaults);
1328                         goto err_map;
1329                 }
1330
1331                 memcpy(vaddr, defaults, engine->context_size);
1332
1333                 i915_gem_object_unpin_map(engine->default_state);
1334                 i915_gem_object_unpin_map(obj);
1335         }
1336
1337         /*
1338          * Try to make the context utilize L3 as well as LLC.
1339          *
1340          * On VLV we don't have L3 controls in the PTEs so we
1341          * shouldn't touch the cache level, especially as that
1342          * would make the object snooped which might have a
1343          * negative performance impact.
1344          *
1345          * Snooping is required on non-llc platforms in execlist
1346          * mode, but since all GGTT accesses use PAT entry 0 we
1347          * get snooping anyway regardless of cache_level.
1348          *
1349          * This is only applicable for Ivy Bridge devices since
1350          * later platforms don't have L3 control bits in the PTE.
1351          */
1352         if (IS_IVYBRIDGE(i915)) {
1353                 /* Ignore any error, regard it as a simple optimisation */
1354                 i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
1355         }
1356
1357         vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1358         if (IS_ERR(vma)) {
1359                 err = PTR_ERR(vma);
1360                 goto err_obj;
1361         }
1362
1363         return vma;
1364
1365 err_map:
1366         i915_gem_object_unpin_map(obj);
1367 err_obj:
1368         i915_gem_object_put(obj);
1369         return ERR_PTR(err);
1370 }
1371
1372 static struct intel_context *
1373 __ring_context_pin(struct intel_engine_cs *engine,
1374                    struct i915_gem_context *ctx,
1375                    struct intel_context *ce)
1376 {
1377         int err;
1378
1379         if (!ce->state && engine->context_size) {
1380                 struct i915_vma *vma;
1381
1382                 vma = alloc_context_vma(engine);
1383                 if (IS_ERR(vma)) {
1384                         err = PTR_ERR(vma);
1385                         goto err;
1386                 }
1387
1388                 ce->state = vma;
1389         }
1390
1391         err = __context_pin(ce);
1392         if (err)
1393                 goto err;
1394
1395         err = __context_pin_ppgtt(ce->gem_context);
1396         if (err)
1397                 goto err_unpin;
1398
1399         i915_gem_context_get(ctx);
1400
1401         /* One ringbuffer to rule them all */
1402         GEM_BUG_ON(!engine->buffer);
1403         ce->ring = engine->buffer;
1404
1405         return ce;
1406
1407 err_unpin:
1408         __context_unpin(ce);
1409 err:
1410         ce->pin_count = 0;
1411         return ERR_PTR(err);
1412 }
1413
1414 static const struct intel_context_ops ring_context_ops = {
1415         .unpin = intel_ring_context_unpin,
1416         .destroy = intel_ring_context_destroy,
1417 };
1418
1419 static struct intel_context *
1420 intel_ring_context_pin(struct intel_engine_cs *engine,
1421                        struct i915_gem_context *ctx)
1422 {
1423         struct intel_context *ce = to_intel_context(ctx, engine);
1424
1425         lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1426
1427         if (likely(ce->pin_count++))
1428                 return ce;
1429         GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
1430
1431         ce->ops = &ring_context_ops;
1432
1433         return __ring_context_pin(engine, ctx, ce);
1434 }
1435
1436 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
1437 {
1438         struct i915_timeline *timeline;
1439         struct intel_ring *ring;
1440         unsigned int size;
1441         int err;
1442
1443         intel_engine_setup_common(engine);
1444
1445         timeline = i915_timeline_create(engine->i915, engine->name);
1446         if (IS_ERR(timeline)) {
1447                 err = PTR_ERR(timeline);
1448                 goto err;
1449         }
1450
1451         ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
1452         i915_timeline_put(timeline);
1453         if (IS_ERR(ring)) {
1454                 err = PTR_ERR(ring);
1455                 goto err;
1456         }
1457
1458         err = intel_ring_pin(ring);
1459         if (err)
1460                 goto err_ring;
1461
1462         GEM_BUG_ON(engine->buffer);
1463         engine->buffer = ring;
1464
1465         size = PAGE_SIZE;
1466         if (HAS_BROKEN_CS_TLB(engine->i915))
1467                 size = I830_WA_SIZE;
1468         err = intel_engine_create_scratch(engine, size);
1469         if (err)
1470                 goto err_unpin;
1471
1472         err = intel_engine_init_common(engine);
1473         if (err)
1474                 goto err_scratch;
1475
1476         return 0;
1477
1478 err_scratch:
1479         intel_engine_cleanup_scratch(engine);
1480 err_unpin:
1481         intel_ring_unpin(ring);
1482 err_ring:
1483         intel_ring_free(ring);
1484 err:
1485         intel_engine_cleanup_common(engine);
1486         return err;
1487 }
1488
1489 void intel_engine_cleanup(struct intel_engine_cs *engine)
1490 {
1491         struct drm_i915_private *dev_priv = engine->i915;
1492
1493         WARN_ON(INTEL_GEN(dev_priv) > 2 &&
1494                 (I915_READ_MODE(engine) & MODE_IDLE) == 0);
1495
1496         intel_ring_unpin(engine->buffer);
1497         intel_ring_free(engine->buffer);
1498
1499         if (engine->cleanup)
1500                 engine->cleanup(engine);
1501
1502         intel_engine_cleanup_common(engine);
1503
1504         dev_priv->engine[engine->id] = NULL;
1505         kfree(engine);
1506 }
1507
1508 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
1509 {
1510         struct intel_engine_cs *engine;
1511         enum intel_engine_id id;
1512
1513         /* Restart from the beginning of the rings for convenience */
1514         for_each_engine(engine, dev_priv, id)
1515                 intel_ring_reset(engine->buffer, 0);
1516 }
1517
1518 static int load_pd_dir(struct i915_request *rq,
1519                        const struct i915_hw_ppgtt *ppgtt)
1520 {
1521         const struct intel_engine_cs * const engine = rq->engine;
1522         u32 *cs;
1523
1524         cs = intel_ring_begin(rq, 6);
1525         if (IS_ERR(cs))
1526                 return PTR_ERR(cs);
1527
1528         *cs++ = MI_LOAD_REGISTER_IMM(1);
1529         *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1530         *cs++ = PP_DIR_DCLV_2G;
1531
1532         *cs++ = MI_LOAD_REGISTER_IMM(1);
1533         *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1534         *cs++ = ppgtt->pd.base.ggtt_offset << 10;
1535
1536         intel_ring_advance(rq, cs);
1537
1538         return 0;
1539 }
1540
1541 static int flush_pd_dir(struct i915_request *rq)
1542 {
1543         const struct intel_engine_cs * const engine = rq->engine;
1544         u32 *cs;
1545
1546         cs = intel_ring_begin(rq, 4);
1547         if (IS_ERR(cs))
1548                 return PTR_ERR(cs);
1549
1550         /* Stall until the page table load is complete */
1551         *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1552         *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1553         *cs++ = i915_ggtt_offset(engine->scratch);
1554         *cs++ = MI_NOOP;
1555
1556         intel_ring_advance(rq, cs);
1557         return 0;
1558 }
1559
1560 static inline int mi_set_context(struct i915_request *rq, u32 flags)
1561 {
1562         struct drm_i915_private *i915 = rq->i915;
1563         struct intel_engine_cs *engine = rq->engine;
1564         enum intel_engine_id id;
1565         const int num_rings =
1566                 /* Use an extended w/a on gen7 if signalling from other rings */
1567                 (HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ?
1568                 INTEL_INFO(i915)->num_rings - 1 :
1569                 0;
1570         bool force_restore = false;
1571         int len;
1572         u32 *cs;
1573
1574         flags |= MI_MM_SPACE_GTT;
1575         if (IS_HASWELL(i915))
1576                 /* These flags are for resource streamer on HSW+ */
1577                 flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
1578         else
1579                 flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
1580
1581         len = 4;
1582         if (IS_GEN7(i915))
1583                 len += 2 + (num_rings ? 4*num_rings + 6 : 0);
1584         if (flags & MI_FORCE_RESTORE) {
1585                 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
1586                 flags &= ~MI_FORCE_RESTORE;
1587                 force_restore = true;
1588                 len += 2;
1589         }
1590
1591         cs = intel_ring_begin(rq, len);
1592         if (IS_ERR(cs))
1593                 return PTR_ERR(cs);
1594
1595         /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
1596         if (IS_GEN7(i915)) {
1597                 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1598                 if (num_rings) {
1599                         struct intel_engine_cs *signaller;
1600
1601                         *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
1602                         for_each_engine(signaller, i915, id) {
1603                                 if (signaller == engine)
1604                                         continue;
1605
1606                                 *cs++ = i915_mmio_reg_offset(
1607                                            RING_PSMI_CTL(signaller->mmio_base));
1608                                 *cs++ = _MASKED_BIT_ENABLE(
1609                                                 GEN6_PSMI_SLEEP_MSG_DISABLE);
1610                         }
1611                 }
1612         }
1613
1614         if (force_restore) {
1615                 /*
1616                  * The HW doesn't handle being told to restore the current
1617                  * context very well. Quite often it likes goes to go off and
1618                  * sulk, especially when it is meant to be reloading PP_DIR.
1619                  * A very simple fix to force the reload is to simply switch
1620                  * away from the current context and back again.
1621                  *
1622                  * Note that the kernel_context will contain random state
1623                  * following the INHIBIT_RESTORE. We accept this since we
1624                  * never use the kernel_context state; it is merely a
1625                  * placeholder we use to flush other contexts.
1626                  */
1627                 *cs++ = MI_SET_CONTEXT;
1628                 *cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context,
1629                                                           engine)->state) |
1630                         MI_MM_SPACE_GTT |
1631                         MI_RESTORE_INHIBIT;
1632         }
1633
1634         *cs++ = MI_NOOP;
1635         *cs++ = MI_SET_CONTEXT;
1636         *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
1637         /*
1638          * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
1639          * WaMiSetContext_Hang:snb,ivb,vlv
1640          */
1641         *cs++ = MI_NOOP;
1642
1643         if (IS_GEN7(i915)) {
1644                 if (num_rings) {
1645                         struct intel_engine_cs *signaller;
1646                         i915_reg_t last_reg = {}; /* keep gcc quiet */
1647
1648                         *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
1649                         for_each_engine(signaller, i915, id) {
1650                                 if (signaller == engine)
1651                                         continue;
1652
1653                                 last_reg = RING_PSMI_CTL(signaller->mmio_base);
1654                                 *cs++ = i915_mmio_reg_offset(last_reg);
1655                                 *cs++ = _MASKED_BIT_DISABLE(
1656                                                 GEN6_PSMI_SLEEP_MSG_DISABLE);
1657                         }
1658
1659                         /* Insert a delay before the next switch! */
1660                         *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1661                         *cs++ = i915_mmio_reg_offset(last_reg);
1662                         *cs++ = i915_ggtt_offset(engine->scratch);
1663                         *cs++ = MI_NOOP;
1664                 }
1665                 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1666         }
1667
1668         intel_ring_advance(rq, cs);
1669
1670         return 0;
1671 }
1672
1673 static int remap_l3(struct i915_request *rq, int slice)
1674 {
1675         u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
1676         int i;
1677
1678         if (!remap_info)
1679                 return 0;
1680
1681         cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2);
1682         if (IS_ERR(cs))
1683                 return PTR_ERR(cs);
1684
1685         /*
1686          * Note: We do not worry about the concurrent register cacheline hang
1687          * here because no other code should access these registers other than
1688          * at initialization time.
1689          */
1690         *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
1691         for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
1692                 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
1693                 *cs++ = remap_info[i];
1694         }
1695         *cs++ = MI_NOOP;
1696         intel_ring_advance(rq, cs);
1697
1698         return 0;
1699 }
1700
1701 static int switch_context(struct i915_request *rq)
1702 {
1703         struct intel_engine_cs *engine = rq->engine;
1704         struct i915_gem_context *ctx = rq->gem_context;
1705         struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
1706         unsigned int unwind_mm = 0;
1707         u32 hw_flags = 0;
1708         int ret, i;
1709
1710         lockdep_assert_held(&rq->i915->drm.struct_mutex);
1711         GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
1712
1713         if (ppgtt) {
1714                 int loops;
1715
1716                 /*
1717                  * Baytail takes a little more convincing that it really needs
1718                  * to reload the PD between contexts. It is not just a little
1719                  * longer, as adding more stalls after the load_pd_dir (i.e.
1720                  * adding a long loop around flush_pd_dir) is not as effective
1721                  * as reloading the PD umpteen times. 32 is derived from
1722                  * experimentation (gem_exec_parallel/fds) and has no good
1723                  * explanation.
1724                  */
1725                 loops = 1;
1726                 if (engine->id == BCS && IS_VALLEYVIEW(engine->i915))
1727                         loops = 32;
1728
1729                 do {
1730                         ret = load_pd_dir(rq, ppgtt);
1731                         if (ret)
1732                                 goto err;
1733                 } while (--loops);
1734
1735                 if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) {
1736                         unwind_mm = intel_engine_flag(engine);
1737                         ppgtt->pd_dirty_rings &= ~unwind_mm;
1738                         hw_flags = MI_FORCE_RESTORE;
1739                 }
1740         }
1741
1742         if (rq->hw_context->state) {
1743                 GEM_BUG_ON(engine->id != RCS);
1744
1745                 /*
1746                  * The kernel context(s) is treated as pure scratch and is not
1747                  * expected to retain any state (as we sacrifice it during
1748                  * suspend and on resume it may be corrupted). This is ok,
1749                  * as nothing actually executes using the kernel context; it
1750                  * is purely used for flushing user contexts.
1751                  */
1752                 if (i915_gem_context_is_kernel(ctx))
1753                         hw_flags = MI_RESTORE_INHIBIT;
1754
1755                 ret = mi_set_context(rq, hw_flags);
1756                 if (ret)
1757                         goto err_mm;
1758         }
1759
1760         if (ppgtt) {
1761                 ret = engine->emit_flush(rq, EMIT_INVALIDATE);
1762                 if (ret)
1763                         goto err_mm;
1764
1765                 ret = flush_pd_dir(rq);
1766                 if (ret)
1767                         goto err_mm;
1768
1769                 /*
1770                  * Not only do we need a full barrier (post-sync write) after
1771                  * invalidating the TLBs, but we need to wait a little bit
1772                  * longer. Whether this is merely delaying us, or the
1773                  * subsequent flush is a key part of serialising with the
1774                  * post-sync op, this extra pass appears vital before a
1775                  * mm switch!
1776                  */
1777                 ret = engine->emit_flush(rq, EMIT_INVALIDATE);
1778                 if (ret)
1779                         goto err_mm;
1780
1781                 ret = engine->emit_flush(rq, EMIT_FLUSH);
1782                 if (ret)
1783                         goto err_mm;
1784         }
1785
1786         if (ctx->remap_slice) {
1787                 for (i = 0; i < MAX_L3_SLICES; i++) {
1788                         if (!(ctx->remap_slice & BIT(i)))
1789                                 continue;
1790
1791                         ret = remap_l3(rq, i);
1792                         if (ret)
1793                                 goto err_mm;
1794                 }
1795
1796                 ctx->remap_slice = 0;
1797         }
1798
1799         return 0;
1800
1801 err_mm:
1802         if (unwind_mm)
1803                 ppgtt->pd_dirty_rings |= unwind_mm;
1804 err:
1805         return ret;
1806 }
1807
1808 static int ring_request_alloc(struct i915_request *request)
1809 {
1810         int ret;
1811
1812         GEM_BUG_ON(!request->hw_context->pin_count);
1813
1814         /* Flush enough space to reduce the likelihood of waiting after
1815          * we start building the request - in which case we will just
1816          * have to repeat work.
1817          */
1818         request->reserved_space += LEGACY_REQUEST_SIZE;
1819
1820         ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
1821         if (ret)
1822                 return ret;
1823
1824         ret = switch_context(request);
1825         if (ret)
1826                 return ret;
1827
1828         request->reserved_space -= LEGACY_REQUEST_SIZE;
1829         return 0;
1830 }
1831
1832 static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
1833 {
1834         struct i915_request *target;
1835         long timeout;
1836
1837         lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex);
1838
1839         if (intel_ring_update_space(ring) >= bytes)
1840                 return 0;
1841
1842         GEM_BUG_ON(list_empty(&ring->request_list));
1843         list_for_each_entry(target, &ring->request_list, ring_link) {
1844                 /* Would completion of this request free enough space? */
1845                 if (bytes <= __intel_ring_space(target->postfix,
1846                                                 ring->emit, ring->size))
1847                         break;
1848         }
1849
1850         if (WARN_ON(&target->ring_link == &ring->request_list))
1851                 return -ENOSPC;
1852
1853         timeout = i915_request_wait(target,
1854                                     I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
1855                                     MAX_SCHEDULE_TIMEOUT);
1856         if (timeout < 0)
1857                 return timeout;
1858
1859         i915_request_retire_upto(target);
1860
1861         intel_ring_update_space(ring);
1862         GEM_BUG_ON(ring->space < bytes);
1863         return 0;
1864 }
1865
1866 int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes)
1867 {
1868         GEM_BUG_ON(bytes > ring->effective_size);
1869         if (unlikely(bytes > ring->effective_size - ring->emit))
1870                 bytes += ring->size - ring->emit;
1871
1872         if (unlikely(bytes > ring->space)) {
1873                 int ret = wait_for_space(ring, bytes);
1874                 if (unlikely(ret))
1875                         return ret;
1876         }
1877
1878         GEM_BUG_ON(ring->space < bytes);
1879         return 0;
1880 }
1881
1882 u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
1883 {
1884         struct intel_ring *ring = rq->ring;
1885         const unsigned int remain_usable = ring->effective_size - ring->emit;
1886         const unsigned int bytes = num_dwords * sizeof(u32);
1887         unsigned int need_wrap = 0;
1888         unsigned int total_bytes;
1889         u32 *cs;
1890
1891         /* Packets must be qword aligned. */
1892         GEM_BUG_ON(num_dwords & 1);
1893
1894         total_bytes = bytes + rq->reserved_space;
1895         GEM_BUG_ON(total_bytes > ring->effective_size);
1896
1897         if (unlikely(total_bytes > remain_usable)) {
1898                 const int remain_actual = ring->size - ring->emit;
1899
1900                 if (bytes > remain_usable) {
1901                         /*
1902                          * Not enough space for the basic request. So need to
1903                          * flush out the remainder and then wait for
1904                          * base + reserved.
1905                          */
1906                         total_bytes += remain_actual;
1907                         need_wrap = remain_actual | 1;
1908                 } else  {
1909                         /*
1910                          * The base request will fit but the reserved space
1911                          * falls off the end. So we don't need an immediate
1912                          * wrap and only need to effectively wait for the
1913                          * reserved size from the start of ringbuffer.
1914                          */
1915                         total_bytes = rq->reserved_space + remain_actual;
1916                 }
1917         }
1918
1919         if (unlikely(total_bytes > ring->space)) {
1920                 int ret;
1921
1922                 /*
1923                  * Space is reserved in the ringbuffer for finalising the
1924                  * request, as that cannot be allowed to fail. During request
1925                  * finalisation, reserved_space is set to 0 to stop the
1926                  * overallocation and the assumption is that then we never need
1927                  * to wait (which has the risk of failing with EINTR).
1928                  *
1929                  * See also i915_request_alloc() and i915_request_add().
1930                  */
1931                 GEM_BUG_ON(!rq->reserved_space);
1932
1933                 ret = wait_for_space(ring, total_bytes);
1934                 if (unlikely(ret))
1935                         return ERR_PTR(ret);
1936         }
1937
1938         if (unlikely(need_wrap)) {
1939                 need_wrap &= ~1;
1940                 GEM_BUG_ON(need_wrap > ring->space);
1941                 GEM_BUG_ON(ring->emit + need_wrap > ring->size);
1942                 GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
1943
1944                 /* Fill the tail with MI_NOOP */
1945                 memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
1946                 ring->space -= need_wrap;
1947                 ring->emit = 0;
1948         }
1949
1950         GEM_BUG_ON(ring->emit > ring->size - bytes);
1951         GEM_BUG_ON(ring->space < bytes);
1952         cs = ring->vaddr + ring->emit;
1953         GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
1954         ring->emit += bytes;
1955         ring->space -= bytes;
1956
1957         return cs;
1958 }
1959
1960 /* Align the ring tail to a cacheline boundary */
1961 int intel_ring_cacheline_align(struct i915_request *rq)
1962 {
1963         int num_dwords;
1964         void *cs;
1965
1966         num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
1967         if (num_dwords == 0)
1968                 return 0;
1969
1970         num_dwords = CACHELINE_DWORDS - num_dwords;
1971         GEM_BUG_ON(num_dwords & 1);
1972
1973         cs = intel_ring_begin(rq, num_dwords);
1974         if (IS_ERR(cs))
1975                 return PTR_ERR(cs);
1976
1977         memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
1978         intel_ring_advance(rq, cs);
1979
1980         GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
1981         return 0;
1982 }
1983
1984 static void gen6_bsd_submit_request(struct i915_request *request)
1985 {
1986         struct drm_i915_private *dev_priv = request->i915;
1987
1988         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1989
1990        /* Every tail move must follow the sequence below */
1991
1992         /* Disable notification that the ring is IDLE. The GT
1993          * will then assume that it is busy and bring it out of rc6.
1994          */
1995         I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
1996                       _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1997
1998         /* Clear the context id. Here be magic! */
1999         I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
2000
2001         /* Wait for the ring not to be idle, i.e. for it to wake up. */
2002         if (__intel_wait_for_register_fw(dev_priv,
2003                                          GEN6_BSD_SLEEP_PSMI_CONTROL,
2004                                          GEN6_BSD_SLEEP_INDICATOR,
2005                                          0,
2006                                          1000, 0, NULL))
2007                 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
2008
2009         /* Now that the ring is fully powered up, update the tail */
2010         i9xx_submit_request(request);
2011
2012         /* Let the ring send IDLE messages to the GT again,
2013          * and so let it sleep to conserve power when idle.
2014          */
2015         I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2016                       _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2017
2018         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2019 }
2020
2021 static int mi_flush_dw(struct i915_request *rq, u32 flags)
2022 {
2023         u32 cmd, *cs;
2024
2025         cs = intel_ring_begin(rq, 4);
2026         if (IS_ERR(cs))
2027                 return PTR_ERR(cs);
2028
2029         cmd = MI_FLUSH_DW;
2030
2031         /*
2032          * We always require a command barrier so that subsequent
2033          * commands, such as breadcrumb interrupts, are strictly ordered
2034          * wrt the contents of the write cache being flushed to memory
2035          * (and thus being coherent from the CPU).
2036          */
2037         cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2038
2039         /*
2040          * Bspec vol 1c.3 - blitter engine command streamer:
2041          * "If ENABLED, all TLBs will be invalidated once the flush
2042          * operation is complete. This bit is only valid when the
2043          * Post-Sync Operation field is a value of 1h or 3h."
2044          */
2045         cmd |= flags;
2046
2047         *cs++ = cmd;
2048         *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
2049         *cs++ = 0;
2050         *cs++ = MI_NOOP;
2051
2052         intel_ring_advance(rq, cs);
2053
2054         return 0;
2055 }
2056
2057 static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
2058 {
2059         return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
2060 }
2061
2062 static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
2063 {
2064         return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
2065 }
2066
2067 static int
2068 hsw_emit_bb_start(struct i915_request *rq,
2069                   u64 offset, u32 len,
2070                   unsigned int dispatch_flags)
2071 {
2072         u32 *cs;
2073
2074         cs = intel_ring_begin(rq, 2);
2075         if (IS_ERR(cs))
2076                 return PTR_ERR(cs);
2077
2078         *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
2079                 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW);
2080         /* bit0-7 is the length on GEN6+ */
2081         *cs++ = offset;
2082         intel_ring_advance(rq, cs);
2083
2084         return 0;
2085 }
2086
2087 static int
2088 gen6_emit_bb_start(struct i915_request *rq,
2089                    u64 offset, u32 len,
2090                    unsigned int dispatch_flags)
2091 {
2092         u32 *cs;
2093
2094         cs = intel_ring_begin(rq, 2);
2095         if (IS_ERR(cs))
2096                 return PTR_ERR(cs);
2097
2098         *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
2099                 0 : MI_BATCH_NON_SECURE_I965);
2100         /* bit0-7 is the length on GEN6+ */
2101         *cs++ = offset;
2102         intel_ring_advance(rq, cs);
2103
2104         return 0;
2105 }
2106
2107 /* Blitter support (SandyBridge+) */
2108
2109 static int gen6_ring_flush(struct i915_request *rq, u32 mode)
2110 {
2111         return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
2112 }
2113
2114 static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
2115                                        struct intel_engine_cs *engine)
2116 {
2117         int i;
2118
2119         if (!HAS_LEGACY_SEMAPHORES(dev_priv))
2120                 return;
2121
2122         GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
2123         engine->semaphore.sync_to = gen6_ring_sync_to;
2124         engine->semaphore.signal = gen6_signal;
2125
2126         /*
2127          * The current semaphore is only applied on pre-gen8
2128          * platform.  And there is no VCS2 ring on the pre-gen8
2129          * platform. So the semaphore between RCS and VCS2 is
2130          * initialized as INVALID.
2131          */
2132         for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
2133                 static const struct {
2134                         u32 wait_mbox;
2135                         i915_reg_t mbox_reg;
2136                 } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
2137                         [RCS_HW] = {
2138                                 [VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RV,  .mbox_reg = GEN6_VRSYNC },
2139                                 [BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_RB,  .mbox_reg = GEN6_BRSYNC },
2140                                 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
2141                         },
2142                         [VCS_HW] = {
2143                                 [RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VR,  .mbox_reg = GEN6_RVSYNC },
2144                                 [BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VB,  .mbox_reg = GEN6_BVSYNC },
2145                                 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
2146                         },
2147                         [BCS_HW] = {
2148                                 [RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BR,  .mbox_reg = GEN6_RBSYNC },
2149                                 [VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_BV,  .mbox_reg = GEN6_VBSYNC },
2150                                 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
2151                         },
2152                         [VECS_HW] = {
2153                                 [RCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
2154                                 [VCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
2155                                 [BCS_HW] =  { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
2156                         },
2157                 };
2158                 u32 wait_mbox;
2159                 i915_reg_t mbox_reg;
2160
2161                 if (i == engine->hw_id) {
2162                         wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
2163                         mbox_reg = GEN6_NOSYNC;
2164                 } else {
2165                         wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
2166                         mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
2167                 }
2168
2169                 engine->semaphore.mbox.wait[i] = wait_mbox;
2170                 engine->semaphore.mbox.signal[i] = mbox_reg;
2171         }
2172 }
2173
2174 static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
2175                                 struct intel_engine_cs *engine)
2176 {
2177         if (INTEL_GEN(dev_priv) >= 6) {
2178                 engine->irq_enable = gen6_irq_enable;
2179                 engine->irq_disable = gen6_irq_disable;
2180                 engine->irq_seqno_barrier = gen6_seqno_barrier;
2181         } else if (INTEL_GEN(dev_priv) >= 5) {
2182                 engine->irq_enable = gen5_irq_enable;
2183                 engine->irq_disable = gen5_irq_disable;
2184                 engine->irq_seqno_barrier = gen5_seqno_barrier;
2185         } else if (INTEL_GEN(dev_priv) >= 3) {
2186                 engine->irq_enable = i9xx_irq_enable;
2187                 engine->irq_disable = i9xx_irq_disable;
2188         } else {
2189                 engine->irq_enable = i8xx_irq_enable;
2190                 engine->irq_disable = i8xx_irq_disable;
2191         }
2192 }
2193
2194 static void i9xx_set_default_submission(struct intel_engine_cs *engine)
2195 {
2196         engine->submit_request = i9xx_submit_request;
2197         engine->cancel_requests = cancel_requests;
2198
2199         engine->park = NULL;
2200         engine->unpark = NULL;
2201 }
2202
2203 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
2204 {
2205         i9xx_set_default_submission(engine);
2206         engine->submit_request = gen6_bsd_submit_request;
2207 }
2208
2209 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2210                                       struct intel_engine_cs *engine)
2211 {
2212         /* gen8+ are only supported with execlists */
2213         GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8);
2214
2215         intel_ring_init_irq(dev_priv, engine);
2216         intel_ring_init_semaphores(dev_priv, engine);
2217
2218         engine->init_hw = init_ring_common;
2219         engine->reset.prepare = reset_prepare;
2220         engine->reset.reset = reset_ring;
2221         engine->reset.finish = reset_finish;
2222
2223         engine->context_pin = intel_ring_context_pin;
2224         engine->request_alloc = ring_request_alloc;
2225
2226         engine->emit_breadcrumb = i9xx_emit_breadcrumb;
2227         engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
2228         if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
2229                 int num_rings;
2230
2231                 engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
2232
2233                 num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
2234                 engine->emit_breadcrumb_sz += num_rings * 3;
2235                 if (num_rings & 1)
2236                         engine->emit_breadcrumb_sz++;
2237         }
2238
2239         engine->set_default_submission = i9xx_set_default_submission;
2240
2241         if (INTEL_GEN(dev_priv) >= 6)
2242                 engine->emit_bb_start = gen6_emit_bb_start;
2243         else if (INTEL_GEN(dev_priv) >= 4)
2244                 engine->emit_bb_start = i965_emit_bb_start;
2245         else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
2246                 engine->emit_bb_start = i830_emit_bb_start;
2247         else
2248                 engine->emit_bb_start = i915_emit_bb_start;
2249 }
2250
2251 int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2252 {
2253         struct drm_i915_private *dev_priv = engine->i915;
2254         int ret;
2255
2256         intel_ring_default_vfuncs(dev_priv, engine);
2257
2258         if (HAS_L3_DPF(dev_priv))
2259                 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2260
2261         engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2262
2263         if (INTEL_GEN(dev_priv) >= 6) {
2264                 engine->init_context = intel_rcs_ctx_init;
2265                 engine->emit_flush = gen7_render_ring_flush;
2266                 if (IS_GEN6(dev_priv))
2267                         engine->emit_flush = gen6_render_ring_flush;
2268         } else if (IS_GEN5(dev_priv)) {
2269                 engine->emit_flush = gen4_render_ring_flush;
2270         } else {
2271                 if (INTEL_GEN(dev_priv) < 4)
2272                         engine->emit_flush = gen2_render_ring_flush;
2273                 else
2274                         engine->emit_flush = gen4_render_ring_flush;
2275                 engine->irq_enable_mask = I915_USER_INTERRUPT;
2276         }
2277
2278         if (IS_HASWELL(dev_priv))
2279                 engine->emit_bb_start = hsw_emit_bb_start;
2280
2281         engine->init_hw = init_render_ring;
2282
2283         ret = intel_init_ring_buffer(engine);
2284         if (ret)
2285                 return ret;
2286
2287         return 0;
2288 }
2289
2290 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
2291 {
2292         struct drm_i915_private *dev_priv = engine->i915;
2293
2294         intel_ring_default_vfuncs(dev_priv, engine);
2295
2296         if (INTEL_GEN(dev_priv) >= 6) {
2297                 /* gen6 bsd needs a special wa for tail updates */
2298                 if (IS_GEN6(dev_priv))
2299                         engine->set_default_submission = gen6_bsd_set_default_submission;
2300                 engine->emit_flush = gen6_bsd_ring_flush;
2301                 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2302         } else {
2303                 engine->emit_flush = bsd_ring_flush;
2304                 if (IS_GEN5(dev_priv))
2305                         engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2306                 else
2307                         engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2308         }
2309
2310         return intel_init_ring_buffer(engine);
2311 }
2312
2313 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
2314 {
2315         struct drm_i915_private *dev_priv = engine->i915;
2316
2317         intel_ring_default_vfuncs(dev_priv, engine);
2318
2319         engine->emit_flush = gen6_ring_flush;
2320         engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2321
2322         return intel_init_ring_buffer(engine);
2323 }
2324
2325 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
2326 {
2327         struct drm_i915_private *dev_priv = engine->i915;
2328
2329         intel_ring_default_vfuncs(dev_priv, engine);
2330
2331         engine->emit_flush = gen6_ring_flush;
2332         engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2333         engine->irq_enable = hsw_vebox_irq_enable;
2334         engine->irq_disable = hsw_vebox_irq_disable;
2335
2336         return intel_init_ring_buffer(engine);
2337 }