2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
30 #include <linux/log2.h>
33 #include <drm/i915_drm.h>
36 #include "i915_gem_render_state.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39 #include "intel_workarounds.h"
41 /* Rough estimate of the typical request size, performing a flush,
42 * set-context and then emitting the batch.
44 #define LEGACY_REQUEST_SIZE 200
46 static unsigned int __intel_ring_space(unsigned int head,
51 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
52 * same cacheline, the Head Pointer must not be greater than the Tail
55 GEM_BUG_ON(!is_power_of_2(size));
56 return (head - tail - CACHELINE_BYTES) & (size - 1);
59 unsigned int intel_ring_update_space(struct intel_ring *ring)
63 space = __intel_ring_space(ring->head, ring->emit, ring->size);
70 gen2_render_ring_flush(struct i915_request *rq, u32 mode)
76 if (mode & EMIT_INVALIDATE)
79 cs = intel_ring_begin(rq, 2);
85 intel_ring_advance(rq, cs);
91 gen4_render_ring_flush(struct i915_request *rq, u32 mode)
99 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
100 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
101 * also flushed at 2d versus 3d pipeline switches.
105 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
106 * MI_READ_FLUSH is set, and is always flushed on 965.
108 * I915_GEM_DOMAIN_COMMAND may not exist?
110 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
111 * invalidated when MI_EXE_FLUSH is set.
113 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
114 * invalidated with every MI_FLUSH.
118 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
119 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
120 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
121 * are flushed at any MI_FLUSH.
125 if (mode & EMIT_INVALIDATE) {
127 if (IS_G4X(rq->i915) || IS_GEN5(rq->i915))
128 cmd |= MI_INVALIDATE_ISP;
132 if (mode & EMIT_INVALIDATE)
135 cs = intel_ring_begin(rq, i);
142 * A random delay to let the CS invalidate take effect? Without this
143 * delay, the GPU relocation path fails as the CS does not see
144 * the updated contents. Just as important, if we apply the flushes
145 * to the EMIT_FLUSH branch (i.e. immediately after the relocation
146 * write and before the invalidate on the next batch), the relocations
147 * still fail. This implies that is a delay following invalidation
148 * that is required to reset the caches as opposed to a delay to
149 * ensure the memory is written.
151 if (mode & EMIT_INVALIDATE) {
152 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
153 *cs++ = i915_ggtt_offset(rq->engine->scratch) |
154 PIPE_CONTROL_GLOBAL_GTT;
158 for (i = 0; i < 12; i++)
161 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
162 *cs++ = i915_ggtt_offset(rq->engine->scratch) |
163 PIPE_CONTROL_GLOBAL_GTT;
170 intel_ring_advance(rq, cs);
176 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
177 * implementing two workarounds on gen6. From section 1.4.7.1
178 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
180 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
181 * produced by non-pipelined state commands), software needs to first
182 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
185 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
186 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
188 * And the workaround for these two requires this workaround first:
190 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
191 * BEFORE the pipe-control with a post-sync op and no write-cache
194 * And this last workaround is tricky because of the requirements on
195 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
198 * "1 of the following must also be set:
199 * - Render Target Cache Flush Enable ([12] of DW1)
200 * - Depth Cache Flush Enable ([0] of DW1)
201 * - Stall at Pixel Scoreboard ([1] of DW1)
202 * - Depth Stall ([13] of DW1)
203 * - Post-Sync Operation ([13] of DW1)
204 * - Notify Enable ([8] of DW1)"
206 * The cache flushes require the workaround flush that triggered this
207 * one, so we can't use it. Depth stall would trigger the same.
208 * Post-sync nonzero is what triggered this second workaround, so we
209 * can't use that one either. Notify enable is IRQs, which aren't
210 * really our business. That leaves only stall at scoreboard.
213 intel_emit_post_sync_nonzero_flush(struct i915_request *rq)
216 i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
219 cs = intel_ring_begin(rq, 6);
223 *cs++ = GFX_OP_PIPE_CONTROL(5);
224 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
225 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
226 *cs++ = 0; /* low dword */
227 *cs++ = 0; /* high dword */
229 intel_ring_advance(rq, cs);
231 cs = intel_ring_begin(rq, 6);
235 *cs++ = GFX_OP_PIPE_CONTROL(5);
236 *cs++ = PIPE_CONTROL_QW_WRITE;
237 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
241 intel_ring_advance(rq, cs);
247 gen6_render_ring_flush(struct i915_request *rq, u32 mode)
250 i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
254 /* Force SNB workarounds for PIPE_CONTROL flushes */
255 ret = intel_emit_post_sync_nonzero_flush(rq);
259 /* Just flush everything. Experiments have shown that reducing the
260 * number of bits based on the write domains has little performance
263 if (mode & EMIT_FLUSH) {
264 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
265 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
267 * Ensure that any following seqno writes only happen
268 * when the render cache is indeed flushed.
270 flags |= PIPE_CONTROL_CS_STALL;
272 if (mode & EMIT_INVALIDATE) {
273 flags |= PIPE_CONTROL_TLB_INVALIDATE;
274 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
275 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
276 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
277 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
278 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
280 * TLB invalidate requires a post-sync write.
282 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
285 cs = intel_ring_begin(rq, 4);
289 *cs++ = GFX_OP_PIPE_CONTROL(4);
291 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
293 intel_ring_advance(rq, cs);
299 gen7_render_ring_cs_stall_wa(struct i915_request *rq)
303 cs = intel_ring_begin(rq, 4);
307 *cs++ = GFX_OP_PIPE_CONTROL(4);
308 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
311 intel_ring_advance(rq, cs);
317 gen7_render_ring_flush(struct i915_request *rq, u32 mode)
320 i915_ggtt_offset(rq->engine->scratch) + 2 * CACHELINE_BYTES;
324 * Ensure that any following seqno writes only happen when the render
325 * cache is indeed flushed.
327 * Workaround: 4th PIPE_CONTROL command (except the ones with only
328 * read-cache invalidate bits set) must have the CS_STALL bit set. We
329 * don't try to be clever and just set it unconditionally.
331 flags |= PIPE_CONTROL_CS_STALL;
333 /* Just flush everything. Experiments have shown that reducing the
334 * number of bits based on the write domains has little performance
337 if (mode & EMIT_FLUSH) {
338 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
339 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
340 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
341 flags |= PIPE_CONTROL_FLUSH_ENABLE;
343 if (mode & EMIT_INVALIDATE) {
344 flags |= PIPE_CONTROL_TLB_INVALIDATE;
345 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
346 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
347 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
348 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
349 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
350 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
352 * TLB invalidate requires a post-sync write.
354 flags |= PIPE_CONTROL_QW_WRITE;
355 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
357 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
359 /* Workaround: we must issue a pipe_control with CS-stall bit
360 * set before a pipe_control command that has the state cache
361 * invalidate bit set. */
362 gen7_render_ring_cs_stall_wa(rq);
365 cs = intel_ring_begin(rq, 4);
369 *cs++ = GFX_OP_PIPE_CONTROL(4);
371 *cs++ = scratch_addr;
373 intel_ring_advance(rq, cs);
378 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
380 struct drm_i915_private *dev_priv = engine->i915;
381 struct page *page = virt_to_page(engine->status_page.page_addr);
382 phys_addr_t phys = PFN_PHYS(page_to_pfn(page));
385 addr = lower_32_bits(phys);
386 if (INTEL_GEN(dev_priv) >= 4)
387 addr |= (phys >> 28) & 0xf0;
389 I915_WRITE(HWS_PGA, addr);
392 static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
394 struct drm_i915_private *dev_priv = engine->i915;
397 /* The ring status page addresses are no longer next to the rest of
398 * the ring registers as of gen7.
400 if (IS_GEN7(dev_priv)) {
401 switch (engine->id) {
403 * No more rings exist on Gen7. Default case is only to shut up
404 * gcc switch check warning.
407 GEM_BUG_ON(engine->id);
409 mmio = RENDER_HWS_PGA_GEN7;
412 mmio = BLT_HWS_PGA_GEN7;
415 mmio = BSD_HWS_PGA_GEN7;
418 mmio = VEBOX_HWS_PGA_GEN7;
421 } else if (IS_GEN6(dev_priv)) {
422 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
424 mmio = RING_HWS_PGA(engine->mmio_base);
427 if (INTEL_GEN(dev_priv) >= 6) {
431 * Keep the render interrupt unmasked as this papers over
432 * lost interrupts following a reset.
434 if (engine->id == RCS)
437 I915_WRITE(RING_HWSTAM(engine->mmio_base), mask);
440 I915_WRITE(mmio, engine->status_page.ggtt_offset);
443 /* Flush the TLB for this page */
444 if (IS_GEN(dev_priv, 6, 7)) {
445 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
447 /* ring should be idle before issuing a sync flush*/
448 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
451 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
453 if (intel_wait_for_register(dev_priv,
454 reg, INSTPM_SYNC_FLUSH, 0,
456 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
461 static bool stop_ring(struct intel_engine_cs *engine)
463 struct drm_i915_private *dev_priv = engine->i915;
465 if (INTEL_GEN(dev_priv) > 2) {
466 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
467 if (intel_wait_for_register(dev_priv,
468 RING_MI_MODE(engine->mmio_base),
472 DRM_ERROR("%s : timed out trying to stop ring\n",
474 /* Sometimes we observe that the idle flag is not
475 * set even though the ring is empty. So double
476 * check before giving up.
478 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
483 I915_WRITE_HEAD(engine, I915_READ_TAIL(engine));
485 I915_WRITE_HEAD(engine, 0);
486 I915_WRITE_TAIL(engine, 0);
488 /* The ring must be empty before it is disabled */
489 I915_WRITE_CTL(engine, 0);
491 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
494 static int init_ring_common(struct intel_engine_cs *engine)
496 struct drm_i915_private *dev_priv = engine->i915;
497 struct intel_ring *ring = engine->buffer;
500 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
502 if (!stop_ring(engine)) {
503 /* G45 ring initialization often fails to reset head to zero */
504 DRM_DEBUG_DRIVER("%s head not reset to zero "
505 "ctl %08x head %08x tail %08x start %08x\n",
507 I915_READ_CTL(engine),
508 I915_READ_HEAD(engine),
509 I915_READ_TAIL(engine),
510 I915_READ_START(engine));
512 if (!stop_ring(engine)) {
513 DRM_ERROR("failed to set %s head to zero "
514 "ctl %08x head %08x tail %08x start %08x\n",
516 I915_READ_CTL(engine),
517 I915_READ_HEAD(engine),
518 I915_READ_TAIL(engine),
519 I915_READ_START(engine));
525 if (HWS_NEEDS_PHYSICAL(dev_priv))
526 ring_setup_phys_status_page(engine);
528 intel_ring_setup_status_page(engine);
530 intel_engine_reset_breadcrumbs(engine);
532 /* Enforce ordering by reading HEAD register back */
533 I915_READ_HEAD(engine);
535 /* Initialize the ring. This must happen _after_ we've cleared the ring
536 * registers with the above sequence (the readback of the HEAD registers
537 * also enforces ordering), otherwise the hw might lose the new ring
538 * register values. */
539 I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
541 /* WaClearRingBufHeadRegAtInit:ctg,elk */
542 if (I915_READ_HEAD(engine))
543 DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
544 engine->name, I915_READ_HEAD(engine));
546 /* Check that the ring offsets point within the ring! */
547 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
548 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
550 intel_ring_update_space(ring);
551 I915_WRITE_HEAD(engine, ring->head);
552 I915_WRITE_TAIL(engine, ring->tail);
553 (void)I915_READ_TAIL(engine);
555 I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
557 /* If the head is still not zero, the ring is dead */
558 if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base),
559 RING_VALID, RING_VALID,
561 DRM_ERROR("%s initialization failed "
562 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
564 I915_READ_CTL(engine),
565 I915_READ_CTL(engine) & RING_VALID,
566 I915_READ_HEAD(engine), ring->head,
567 I915_READ_TAIL(engine), ring->tail,
568 I915_READ_START(engine),
569 i915_ggtt_offset(ring->vma));
574 if (INTEL_GEN(dev_priv) > 2)
575 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
577 /* Papering over lost _interrupts_ immediately following the restart */
578 intel_engine_wakeup(engine);
580 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
585 static struct i915_request *reset_prepare(struct intel_engine_cs *engine)
587 intel_engine_stop_cs(engine);
589 if (engine->irq_seqno_barrier)
590 engine->irq_seqno_barrier(engine);
592 return i915_gem_find_active_request(engine);
595 static void skip_request(struct i915_request *rq)
597 void *vaddr = rq->ring->vaddr;
601 if (rq->postfix < head) {
602 memset32(vaddr + head, MI_NOOP,
603 (rq->ring->size - head) / sizeof(u32));
606 memset32(vaddr + head, MI_NOOP, (rq->postfix - head) / sizeof(u32));
609 static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq)
611 GEM_TRACE("%s request global=%d, current=%d\n",
612 engine->name, rq ? rq->global_seqno : 0,
613 intel_engine_get_seqno(engine));
616 * Try to restore the logical GPU state to match the continuation
617 * of the request queue. If we skip the context/PD restore, then
618 * the next request may try to execute assuming that its context
619 * is valid and loaded on the GPU and so may try to access invalid
620 * memory, prompting repeated GPU hangs.
622 * If the request was guilty, we still restore the logical state
623 * in case the next request requires it (e.g. the aliasing ppgtt),
624 * but skip over the hung batch.
626 * If the request was innocent, we try to replay the request with
627 * the restored context.
630 /* If the rq hung, jump to its breadcrumb and skip the batch */
631 rq->ring->head = intel_ring_wrap(rq->ring, rq->head);
632 if (rq->fence.error == -EIO)
637 static void reset_finish(struct intel_engine_cs *engine)
641 static int intel_rcs_ctx_init(struct i915_request *rq)
645 ret = intel_ctx_workarounds_emit(rq);
649 ret = i915_gem_render_state_emit(rq);
656 static int init_render_ring(struct intel_engine_cs *engine)
658 struct drm_i915_private *dev_priv = engine->i915;
659 int ret = init_ring_common(engine);
663 intel_whitelist_workarounds_apply(engine);
665 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
666 if (IS_GEN(dev_priv, 4, 6))
667 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
669 /* We need to disable the AsyncFlip performance optimisations in order
670 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
671 * programmed to '1' on all products.
673 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
675 if (IS_GEN(dev_priv, 6, 7))
676 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
678 /* Required for the hardware to program scanline values for waiting */
679 /* WaEnableFlushTlbInvalidationMode:snb */
680 if (IS_GEN6(dev_priv))
682 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
684 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
685 if (IS_GEN7(dev_priv))
686 I915_WRITE(GFX_MODE_GEN7,
687 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
688 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
690 if (IS_GEN6(dev_priv)) {
691 /* From the Sandybridge PRM, volume 1 part 3, page 24:
692 * "If this bit is set, STCunit will have LRA as replacement
693 * policy. [...] This bit must be reset. LRA replacement
694 * policy is not supported."
696 I915_WRITE(CACHE_MODE_0,
697 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
700 if (IS_GEN(dev_priv, 6, 7))
701 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
703 if (INTEL_GEN(dev_priv) >= 6)
704 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
709 static u32 *gen6_signal(struct i915_request *rq, u32 *cs)
711 struct drm_i915_private *dev_priv = rq->i915;
712 struct intel_engine_cs *engine;
713 enum intel_engine_id id;
716 for_each_engine(engine, dev_priv, id) {
719 if (!(BIT(engine->hw_id) & GEN6_SEMAPHORES_MASK))
722 mbox_reg = rq->engine->semaphore.mbox.signal[engine->hw_id];
723 if (i915_mmio_reg_valid(mbox_reg)) {
724 *cs++ = MI_LOAD_REGISTER_IMM(1);
725 *cs++ = i915_mmio_reg_offset(mbox_reg);
726 *cs++ = rq->global_seqno;
736 static void cancel_requests(struct intel_engine_cs *engine)
738 struct i915_request *request;
741 spin_lock_irqsave(&engine->timeline.lock, flags);
743 /* Mark all submitted requests as skipped. */
744 list_for_each_entry(request, &engine->timeline.requests, link) {
745 GEM_BUG_ON(!request->global_seqno);
746 if (!i915_request_completed(request))
747 dma_fence_set_error(&request->fence, -EIO);
749 /* Remaining _unready_ requests will be nop'ed when submitted */
751 spin_unlock_irqrestore(&engine->timeline.lock, flags);
754 static void i9xx_submit_request(struct i915_request *request)
756 struct drm_i915_private *dev_priv = request->i915;
758 i915_request_submit(request);
760 I915_WRITE_TAIL(request->engine,
761 intel_ring_set_tail(request->ring, request->tail));
764 static void i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
766 *cs++ = MI_STORE_DWORD_INDEX;
767 *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
768 *cs++ = rq->global_seqno;
769 *cs++ = MI_USER_INTERRUPT;
771 rq->tail = intel_ring_offset(rq, cs);
772 assert_ring_tail_valid(rq->ring, rq->tail);
775 static const int i9xx_emit_breadcrumb_sz = 4;
777 static void gen6_sema_emit_breadcrumb(struct i915_request *rq, u32 *cs)
779 return i9xx_emit_breadcrumb(rq, rq->engine->semaphore.signal(rq, cs));
783 gen6_ring_sync_to(struct i915_request *rq, struct i915_request *signal)
785 u32 dw1 = MI_SEMAPHORE_MBOX |
786 MI_SEMAPHORE_COMPARE |
787 MI_SEMAPHORE_REGISTER;
788 u32 wait_mbox = signal->engine->semaphore.mbox.wait[rq->engine->hw_id];
791 WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
793 cs = intel_ring_begin(rq, 4);
797 *cs++ = dw1 | wait_mbox;
798 /* Throughout all of the GEM code, seqno passed implies our current
799 * seqno is >= the last seqno executed. However for hardware the
800 * comparison is strictly greater than.
802 *cs++ = signal->global_seqno - 1;
805 intel_ring_advance(rq, cs);
811 gen5_seqno_barrier(struct intel_engine_cs *engine)
813 /* MI_STORE are internally buffered by the GPU and not flushed
814 * either by MI_FLUSH or SyncFlush or any other combination of
817 * "Only the submission of the store operation is guaranteed.
818 * The write result will be complete (coherent) some time later
819 * (this is practically a finite period but there is no guaranteed
822 * Empirically, we observe that we need a delay of at least 75us to
823 * be sure that the seqno write is visible by the CPU.
825 usleep_range(125, 250);
829 gen6_seqno_barrier(struct intel_engine_cs *engine)
831 struct drm_i915_private *dev_priv = engine->i915;
833 /* Workaround to force correct ordering between irq and seqno writes on
834 * ivb (and maybe also on snb) by reading from a CS register (like
835 * ACTHD) before reading the status page.
837 * Note that this effectively stalls the read by the time it takes to
838 * do a memory transaction, which more or less ensures that the write
839 * from the GPU has sufficient time to invalidate the CPU cacheline.
840 * Alternatively we could delay the interrupt from the CS ring to give
841 * the write time to land, but that would incur a delay after every
842 * batch i.e. much more frequent than a delay when waiting for the
843 * interrupt (with the same net latency).
845 * Also note that to prevent whole machine hangs on gen7, we have to
846 * take the spinlock to guard against concurrent cacheline access.
848 spin_lock_irq(&dev_priv->uncore.lock);
849 POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
850 spin_unlock_irq(&dev_priv->uncore.lock);
854 gen5_irq_enable(struct intel_engine_cs *engine)
856 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
860 gen5_irq_disable(struct intel_engine_cs *engine)
862 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
866 i9xx_irq_enable(struct intel_engine_cs *engine)
868 struct drm_i915_private *dev_priv = engine->i915;
870 dev_priv->irq_mask &= ~engine->irq_enable_mask;
871 I915_WRITE(IMR, dev_priv->irq_mask);
872 POSTING_READ_FW(RING_IMR(engine->mmio_base));
876 i9xx_irq_disable(struct intel_engine_cs *engine)
878 struct drm_i915_private *dev_priv = engine->i915;
880 dev_priv->irq_mask |= engine->irq_enable_mask;
881 I915_WRITE(IMR, dev_priv->irq_mask);
885 i8xx_irq_enable(struct intel_engine_cs *engine)
887 struct drm_i915_private *dev_priv = engine->i915;
889 dev_priv->irq_mask &= ~engine->irq_enable_mask;
890 I915_WRITE16(IMR, dev_priv->irq_mask);
891 POSTING_READ16(RING_IMR(engine->mmio_base));
895 i8xx_irq_disable(struct intel_engine_cs *engine)
897 struct drm_i915_private *dev_priv = engine->i915;
899 dev_priv->irq_mask |= engine->irq_enable_mask;
900 I915_WRITE16(IMR, dev_priv->irq_mask);
904 bsd_ring_flush(struct i915_request *rq, u32 mode)
908 cs = intel_ring_begin(rq, 2);
914 intel_ring_advance(rq, cs);
919 gen6_irq_enable(struct intel_engine_cs *engine)
921 struct drm_i915_private *dev_priv = engine->i915;
923 I915_WRITE_IMR(engine,
924 ~(engine->irq_enable_mask |
925 engine->irq_keep_mask));
926 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
930 gen6_irq_disable(struct intel_engine_cs *engine)
932 struct drm_i915_private *dev_priv = engine->i915;
934 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
935 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
939 hsw_vebox_irq_enable(struct intel_engine_cs *engine)
941 struct drm_i915_private *dev_priv = engine->i915;
943 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
944 gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
948 hsw_vebox_irq_disable(struct intel_engine_cs *engine)
950 struct drm_i915_private *dev_priv = engine->i915;
952 I915_WRITE_IMR(engine, ~0);
953 gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
957 i965_emit_bb_start(struct i915_request *rq,
958 u64 offset, u32 length,
959 unsigned int dispatch_flags)
963 cs = intel_ring_begin(rq, 2);
967 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
968 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
970 intel_ring_advance(rq, cs);
975 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
976 #define I830_BATCH_LIMIT (256*1024)
977 #define I830_TLB_ENTRIES (2)
978 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
980 i830_emit_bb_start(struct i915_request *rq,
982 unsigned int dispatch_flags)
984 u32 *cs, cs_offset = i915_ggtt_offset(rq->engine->scratch);
986 cs = intel_ring_begin(rq, 6);
990 /* Evict the invalid PTE TLBs */
991 *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
992 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
993 *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
997 intel_ring_advance(rq, cs);
999 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1000 if (len > I830_BATCH_LIMIT)
1003 cs = intel_ring_begin(rq, 6 + 2);
1007 /* Blit the batch (which has now all relocs applied) to the
1008 * stable batch scratch bo area (so that the CS never
1009 * stumbles over its tlb invalidation bug) ...
1011 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
1012 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
1013 *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
1020 intel_ring_advance(rq, cs);
1022 /* ... and execute it. */
1026 cs = intel_ring_begin(rq, 2);
1030 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1031 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1032 MI_BATCH_NON_SECURE);
1033 intel_ring_advance(rq, cs);
1039 i915_emit_bb_start(struct i915_request *rq,
1040 u64 offset, u32 len,
1041 unsigned int dispatch_flags)
1045 cs = intel_ring_begin(rq, 2);
1049 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1050 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1051 MI_BATCH_NON_SECURE);
1052 intel_ring_advance(rq, cs);
1057 int intel_ring_pin(struct intel_ring *ring)
1059 struct i915_vma *vma = ring->vma;
1060 enum i915_map_type map = i915_coherent_map_type(vma->vm->i915);
1065 GEM_BUG_ON(ring->vaddr);
1069 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1070 flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
1072 if (vma->obj->stolen)
1073 flags |= PIN_MAPPABLE;
1077 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1078 if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
1079 ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1081 ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
1086 ret = i915_vma_pin(vma, 0, 0, flags);
1090 if (i915_vma_is_map_and_fenceable(vma))
1091 addr = (void __force *)i915_vma_pin_iomap(vma);
1093 addr = i915_gem_object_pin_map(vma->obj, map);
1097 vma->obj->pin_global++;
1103 i915_vma_unpin(vma);
1104 return PTR_ERR(addr);
1107 void intel_ring_reset(struct intel_ring *ring, u32 tail)
1109 GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
1114 intel_ring_update_space(ring);
1117 void intel_ring_unpin(struct intel_ring *ring)
1119 GEM_BUG_ON(!ring->vma);
1120 GEM_BUG_ON(!ring->vaddr);
1122 /* Discard any unused bytes beyond that submitted to hw. */
1123 intel_ring_reset(ring, ring->tail);
1125 if (i915_vma_is_map_and_fenceable(ring->vma))
1126 i915_vma_unpin_iomap(ring->vma);
1128 i915_gem_object_unpin_map(ring->vma->obj);
1131 ring->vma->obj->pin_global--;
1132 i915_vma_unpin(ring->vma);
1135 static struct i915_vma *
1136 intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
1138 struct i915_address_space *vm = &dev_priv->ggtt.vm;
1139 struct drm_i915_gem_object *obj;
1140 struct i915_vma *vma;
1142 obj = i915_gem_object_create_stolen(dev_priv, size);
1144 obj = i915_gem_object_create_internal(dev_priv, size);
1146 return ERR_CAST(obj);
1149 * Mark ring buffers as read-only from GPU side (so no stray overwrites)
1150 * if supported by the platform's GGTT.
1152 if (vm->has_read_only)
1153 i915_gem_object_set_readonly(obj);
1155 vma = i915_vma_instance(obj, vm, NULL);
1162 i915_gem_object_put(obj);
1167 intel_engine_create_ring(struct intel_engine_cs *engine,
1168 struct i915_timeline *timeline,
1171 struct intel_ring *ring;
1172 struct i915_vma *vma;
1174 GEM_BUG_ON(!is_power_of_2(size));
1175 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
1176 GEM_BUG_ON(timeline == &engine->timeline);
1177 lockdep_assert_held(&engine->i915->drm.struct_mutex);
1179 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1181 return ERR_PTR(-ENOMEM);
1183 INIT_LIST_HEAD(&ring->request_list);
1184 ring->timeline = i915_timeline_get(timeline);
1187 /* Workaround an erratum on the i830 which causes a hang if
1188 * the TAIL pointer points to within the last 2 cachelines
1191 ring->effective_size = size;
1192 if (IS_I830(engine->i915) || IS_I845G(engine->i915))
1193 ring->effective_size -= 2 * CACHELINE_BYTES;
1195 intel_ring_update_space(ring);
1197 vma = intel_ring_create_vma(engine->i915, size);
1200 return ERR_CAST(vma);
1208 intel_ring_free(struct intel_ring *ring)
1210 struct drm_i915_gem_object *obj = ring->vma->obj;
1212 i915_vma_close(ring->vma);
1213 __i915_gem_object_release_unless_active(obj);
1215 i915_timeline_put(ring->timeline);
1219 static void intel_ring_context_destroy(struct intel_context *ce)
1221 GEM_BUG_ON(ce->pin_count);
1226 GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
1227 i915_gem_object_put(ce->state->obj);
1230 static int __context_pin_ppgtt(struct i915_gem_context *ctx)
1232 struct i915_hw_ppgtt *ppgtt;
1235 ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
1237 err = gen6_ppgtt_pin(ppgtt);
1242 static void __context_unpin_ppgtt(struct i915_gem_context *ctx)
1244 struct i915_hw_ppgtt *ppgtt;
1246 ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
1248 gen6_ppgtt_unpin(ppgtt);
1251 static int __context_pin(struct intel_context *ce)
1253 struct i915_vma *vma;
1261 * Clear this page out of any CPU caches for coherent swap-in/out.
1262 * We only want to do this on the first bind so that we do not stall
1263 * on an active context (which by nature is already on the GPU).
1265 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1266 err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1271 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1276 * And mark is as a globally pinned object to let the shrinker know
1277 * it cannot reclaim the object until we release it.
1279 vma->obj->pin_global++;
1284 static void __context_unpin(struct intel_context *ce)
1286 struct i915_vma *vma;
1292 vma->obj->pin_global--;
1293 i915_vma_unpin(vma);
1296 static void intel_ring_context_unpin(struct intel_context *ce)
1298 __context_unpin_ppgtt(ce->gem_context);
1299 __context_unpin(ce);
1301 i915_gem_context_put(ce->gem_context);
1304 static struct i915_vma *
1305 alloc_context_vma(struct intel_engine_cs *engine)
1307 struct drm_i915_private *i915 = engine->i915;
1308 struct drm_i915_gem_object *obj;
1309 struct i915_vma *vma;
1312 obj = i915_gem_object_create(i915, engine->context_size);
1314 return ERR_CAST(obj);
1316 if (engine->default_state) {
1317 void *defaults, *vaddr;
1319 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1320 if (IS_ERR(vaddr)) {
1321 err = PTR_ERR(vaddr);
1325 defaults = i915_gem_object_pin_map(engine->default_state,
1327 if (IS_ERR(defaults)) {
1328 err = PTR_ERR(defaults);
1332 memcpy(vaddr, defaults, engine->context_size);
1334 i915_gem_object_unpin_map(engine->default_state);
1335 i915_gem_object_unpin_map(obj);
1339 * Try to make the context utilize L3 as well as LLC.
1341 * On VLV we don't have L3 controls in the PTEs so we
1342 * shouldn't touch the cache level, especially as that
1343 * would make the object snooped which might have a
1344 * negative performance impact.
1346 * Snooping is required on non-llc platforms in execlist
1347 * mode, but since all GGTT accesses use PAT entry 0 we
1348 * get snooping anyway regardless of cache_level.
1350 * This is only applicable for Ivy Bridge devices since
1351 * later platforms don't have L3 control bits in the PTE.
1353 if (IS_IVYBRIDGE(i915)) {
1354 /* Ignore any error, regard it as a simple optimisation */
1355 i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
1358 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1367 i915_gem_object_unpin_map(obj);
1369 i915_gem_object_put(obj);
1370 return ERR_PTR(err);
1373 static struct intel_context *
1374 __ring_context_pin(struct intel_engine_cs *engine,
1375 struct i915_gem_context *ctx,
1376 struct intel_context *ce)
1380 if (!ce->state && engine->context_size) {
1381 struct i915_vma *vma;
1383 vma = alloc_context_vma(engine);
1392 err = __context_pin(ce);
1396 err = __context_pin_ppgtt(ce->gem_context);
1400 i915_gem_context_get(ctx);
1402 /* One ringbuffer to rule them all */
1403 GEM_BUG_ON(!engine->buffer);
1404 ce->ring = engine->buffer;
1409 __context_unpin(ce);
1412 return ERR_PTR(err);
1415 static const struct intel_context_ops ring_context_ops = {
1416 .unpin = intel_ring_context_unpin,
1417 .destroy = intel_ring_context_destroy,
1420 static struct intel_context *
1421 intel_ring_context_pin(struct intel_engine_cs *engine,
1422 struct i915_gem_context *ctx)
1424 struct intel_context *ce = to_intel_context(ctx, engine);
1426 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1428 if (likely(ce->pin_count++))
1430 GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
1432 ce->ops = &ring_context_ops;
1434 return __ring_context_pin(engine, ctx, ce);
1437 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
1439 struct i915_timeline *timeline;
1440 struct intel_ring *ring;
1444 intel_engine_setup_common(engine);
1446 timeline = i915_timeline_create(engine->i915, engine->name);
1447 if (IS_ERR(timeline)) {
1448 err = PTR_ERR(timeline);
1452 ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
1453 i915_timeline_put(timeline);
1455 err = PTR_ERR(ring);
1459 err = intel_ring_pin(ring);
1463 GEM_BUG_ON(engine->buffer);
1464 engine->buffer = ring;
1467 if (HAS_BROKEN_CS_TLB(engine->i915))
1468 size = I830_WA_SIZE;
1469 err = intel_engine_create_scratch(engine, size);
1473 err = intel_engine_init_common(engine);
1480 intel_engine_cleanup_scratch(engine);
1482 intel_ring_unpin(ring);
1484 intel_ring_free(ring);
1486 intel_engine_cleanup_common(engine);
1490 void intel_engine_cleanup(struct intel_engine_cs *engine)
1492 struct drm_i915_private *dev_priv = engine->i915;
1494 WARN_ON(INTEL_GEN(dev_priv) > 2 &&
1495 (I915_READ_MODE(engine) & MODE_IDLE) == 0);
1497 intel_ring_unpin(engine->buffer);
1498 intel_ring_free(engine->buffer);
1500 if (engine->cleanup)
1501 engine->cleanup(engine);
1503 intel_engine_cleanup_common(engine);
1505 dev_priv->engine[engine->id] = NULL;
1509 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
1511 struct intel_engine_cs *engine;
1512 enum intel_engine_id id;
1514 /* Restart from the beginning of the rings for convenience */
1515 for_each_engine(engine, dev_priv, id)
1516 intel_ring_reset(engine->buffer, 0);
1519 static int load_pd_dir(struct i915_request *rq,
1520 const struct i915_hw_ppgtt *ppgtt)
1522 const struct intel_engine_cs * const engine = rq->engine;
1525 cs = intel_ring_begin(rq, 6);
1529 *cs++ = MI_LOAD_REGISTER_IMM(1);
1530 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1531 *cs++ = PP_DIR_DCLV_2G;
1533 *cs++ = MI_LOAD_REGISTER_IMM(1);
1534 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1535 *cs++ = ppgtt->pd.base.ggtt_offset << 10;
1537 intel_ring_advance(rq, cs);
1542 static int flush_pd_dir(struct i915_request *rq)
1544 const struct intel_engine_cs * const engine = rq->engine;
1547 cs = intel_ring_begin(rq, 4);
1551 /* Stall until the page table load is complete */
1552 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1553 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1554 *cs++ = i915_ggtt_offset(engine->scratch);
1557 intel_ring_advance(rq, cs);
1561 static inline int mi_set_context(struct i915_request *rq, u32 flags)
1563 struct drm_i915_private *i915 = rq->i915;
1564 struct intel_engine_cs *engine = rq->engine;
1565 enum intel_engine_id id;
1566 const int num_rings =
1567 /* Use an extended w/a on gen7 if signalling from other rings */
1568 (HAS_LEGACY_SEMAPHORES(i915) && IS_GEN7(i915)) ?
1569 INTEL_INFO(i915)->num_rings - 1 :
1571 bool force_restore = false;
1575 flags |= MI_MM_SPACE_GTT;
1576 if (IS_HASWELL(i915))
1577 /* These flags are for resource streamer on HSW+ */
1578 flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
1580 flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
1584 len += 2 + (num_rings ? 4*num_rings + 6 : 0);
1585 if (flags & MI_FORCE_RESTORE) {
1586 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
1587 flags &= ~MI_FORCE_RESTORE;
1588 force_restore = true;
1592 cs = intel_ring_begin(rq, len);
1596 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
1597 if (IS_GEN7(i915)) {
1598 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1600 struct intel_engine_cs *signaller;
1602 *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
1603 for_each_engine(signaller, i915, id) {
1604 if (signaller == engine)
1607 *cs++ = i915_mmio_reg_offset(
1608 RING_PSMI_CTL(signaller->mmio_base));
1609 *cs++ = _MASKED_BIT_ENABLE(
1610 GEN6_PSMI_SLEEP_MSG_DISABLE);
1615 if (force_restore) {
1617 * The HW doesn't handle being told to restore the current
1618 * context very well. Quite often it likes goes to go off and
1619 * sulk, especially when it is meant to be reloading PP_DIR.
1620 * A very simple fix to force the reload is to simply switch
1621 * away from the current context and back again.
1623 * Note that the kernel_context will contain random state
1624 * following the INHIBIT_RESTORE. We accept this since we
1625 * never use the kernel_context state; it is merely a
1626 * placeholder we use to flush other contexts.
1628 *cs++ = MI_SET_CONTEXT;
1629 *cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context,
1636 *cs++ = MI_SET_CONTEXT;
1637 *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
1639 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
1640 * WaMiSetContext_Hang:snb,ivb,vlv
1644 if (IS_GEN7(i915)) {
1646 struct intel_engine_cs *signaller;
1647 i915_reg_t last_reg = {}; /* keep gcc quiet */
1649 *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
1650 for_each_engine(signaller, i915, id) {
1651 if (signaller == engine)
1654 last_reg = RING_PSMI_CTL(signaller->mmio_base);
1655 *cs++ = i915_mmio_reg_offset(last_reg);
1656 *cs++ = _MASKED_BIT_DISABLE(
1657 GEN6_PSMI_SLEEP_MSG_DISABLE);
1660 /* Insert a delay before the next switch! */
1661 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1662 *cs++ = i915_mmio_reg_offset(last_reg);
1663 *cs++ = i915_ggtt_offset(engine->scratch);
1666 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1669 intel_ring_advance(rq, cs);
1674 static int remap_l3(struct i915_request *rq, int slice)
1676 u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
1682 cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2);
1687 * Note: We do not worry about the concurrent register cacheline hang
1688 * here because no other code should access these registers other than
1689 * at initialization time.
1691 *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
1692 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
1693 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
1694 *cs++ = remap_info[i];
1697 intel_ring_advance(rq, cs);
1702 static int switch_context(struct i915_request *rq)
1704 struct intel_engine_cs *engine = rq->engine;
1705 struct i915_gem_context *ctx = rq->gem_context;
1706 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
1707 unsigned int unwind_mm = 0;
1711 lockdep_assert_held(&rq->i915->drm.struct_mutex);
1712 GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
1718 * Baytail takes a little more convincing that it really needs
1719 * to reload the PD between contexts. It is not just a little
1720 * longer, as adding more stalls after the load_pd_dir (i.e.
1721 * adding a long loop around flush_pd_dir) is not as effective
1722 * as reloading the PD umpteen times. 32 is derived from
1723 * experimentation (gem_exec_parallel/fds) and has no good
1727 if (engine->id == BCS && IS_VALLEYVIEW(engine->i915))
1731 ret = load_pd_dir(rq, ppgtt);
1736 if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) {
1737 unwind_mm = intel_engine_flag(engine);
1738 ppgtt->pd_dirty_rings &= ~unwind_mm;
1739 hw_flags = MI_FORCE_RESTORE;
1743 if (rq->hw_context->state) {
1744 GEM_BUG_ON(engine->id != RCS);
1747 * The kernel context(s) is treated as pure scratch and is not
1748 * expected to retain any state (as we sacrifice it during
1749 * suspend and on resume it may be corrupted). This is ok,
1750 * as nothing actually executes using the kernel context; it
1751 * is purely used for flushing user contexts.
1753 if (i915_gem_context_is_kernel(ctx))
1754 hw_flags = MI_RESTORE_INHIBIT;
1756 ret = mi_set_context(rq, hw_flags);
1762 ret = engine->emit_flush(rq, EMIT_INVALIDATE);
1766 ret = flush_pd_dir(rq);
1771 * Not only do we need a full barrier (post-sync write) after
1772 * invalidating the TLBs, but we need to wait a little bit
1773 * longer. Whether this is merely delaying us, or the
1774 * subsequent flush is a key part of serialising with the
1775 * post-sync op, this extra pass appears vital before a
1778 ret = engine->emit_flush(rq, EMIT_INVALIDATE);
1782 ret = engine->emit_flush(rq, EMIT_FLUSH);
1787 if (ctx->remap_slice) {
1788 for (i = 0; i < MAX_L3_SLICES; i++) {
1789 if (!(ctx->remap_slice & BIT(i)))
1792 ret = remap_l3(rq, i);
1797 ctx->remap_slice = 0;
1804 ppgtt->pd_dirty_rings |= unwind_mm;
1809 static int ring_request_alloc(struct i915_request *request)
1813 GEM_BUG_ON(!request->hw_context->pin_count);
1815 /* Flush enough space to reduce the likelihood of waiting after
1816 * we start building the request - in which case we will just
1817 * have to repeat work.
1819 request->reserved_space += LEGACY_REQUEST_SIZE;
1821 ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
1825 ret = switch_context(request);
1829 request->reserved_space -= LEGACY_REQUEST_SIZE;
1833 static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
1835 struct i915_request *target;
1838 lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex);
1840 if (intel_ring_update_space(ring) >= bytes)
1843 GEM_BUG_ON(list_empty(&ring->request_list));
1844 list_for_each_entry(target, &ring->request_list, ring_link) {
1845 /* Would completion of this request free enough space? */
1846 if (bytes <= __intel_ring_space(target->postfix,
1847 ring->emit, ring->size))
1851 if (WARN_ON(&target->ring_link == &ring->request_list))
1854 timeout = i915_request_wait(target,
1855 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
1856 MAX_SCHEDULE_TIMEOUT);
1860 i915_request_retire_upto(target);
1862 intel_ring_update_space(ring);
1863 GEM_BUG_ON(ring->space < bytes);
1867 int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes)
1869 GEM_BUG_ON(bytes > ring->effective_size);
1870 if (unlikely(bytes > ring->effective_size - ring->emit))
1871 bytes += ring->size - ring->emit;
1873 if (unlikely(bytes > ring->space)) {
1874 int ret = wait_for_space(ring, bytes);
1879 GEM_BUG_ON(ring->space < bytes);
1883 u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
1885 struct intel_ring *ring = rq->ring;
1886 const unsigned int remain_usable = ring->effective_size - ring->emit;
1887 const unsigned int bytes = num_dwords * sizeof(u32);
1888 unsigned int need_wrap = 0;
1889 unsigned int total_bytes;
1892 /* Packets must be qword aligned. */
1893 GEM_BUG_ON(num_dwords & 1);
1895 total_bytes = bytes + rq->reserved_space;
1896 GEM_BUG_ON(total_bytes > ring->effective_size);
1898 if (unlikely(total_bytes > remain_usable)) {
1899 const int remain_actual = ring->size - ring->emit;
1901 if (bytes > remain_usable) {
1903 * Not enough space for the basic request. So need to
1904 * flush out the remainder and then wait for
1907 total_bytes += remain_actual;
1908 need_wrap = remain_actual | 1;
1911 * The base request will fit but the reserved space
1912 * falls off the end. So we don't need an immediate
1913 * wrap and only need to effectively wait for the
1914 * reserved size from the start of ringbuffer.
1916 total_bytes = rq->reserved_space + remain_actual;
1920 if (unlikely(total_bytes > ring->space)) {
1924 * Space is reserved in the ringbuffer for finalising the
1925 * request, as that cannot be allowed to fail. During request
1926 * finalisation, reserved_space is set to 0 to stop the
1927 * overallocation and the assumption is that then we never need
1928 * to wait (which has the risk of failing with EINTR).
1930 * See also i915_request_alloc() and i915_request_add().
1932 GEM_BUG_ON(!rq->reserved_space);
1934 ret = wait_for_space(ring, total_bytes);
1936 return ERR_PTR(ret);
1939 if (unlikely(need_wrap)) {
1941 GEM_BUG_ON(need_wrap > ring->space);
1942 GEM_BUG_ON(ring->emit + need_wrap > ring->size);
1943 GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
1945 /* Fill the tail with MI_NOOP */
1946 memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
1947 ring->space -= need_wrap;
1951 GEM_BUG_ON(ring->emit > ring->size - bytes);
1952 GEM_BUG_ON(ring->space < bytes);
1953 cs = ring->vaddr + ring->emit;
1954 GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
1955 ring->emit += bytes;
1956 ring->space -= bytes;
1961 /* Align the ring tail to a cacheline boundary */
1962 int intel_ring_cacheline_align(struct i915_request *rq)
1967 num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
1968 if (num_dwords == 0)
1971 num_dwords = CACHELINE_DWORDS - num_dwords;
1972 GEM_BUG_ON(num_dwords & 1);
1974 cs = intel_ring_begin(rq, num_dwords);
1978 memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
1979 intel_ring_advance(rq, cs);
1981 GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
1985 static void gen6_bsd_submit_request(struct i915_request *request)
1987 struct drm_i915_private *dev_priv = request->i915;
1989 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1991 /* Every tail move must follow the sequence below */
1993 /* Disable notification that the ring is IDLE. The GT
1994 * will then assume that it is busy and bring it out of rc6.
1996 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
1997 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1999 /* Clear the context id. Here be magic! */
2000 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
2002 /* Wait for the ring not to be idle, i.e. for it to wake up. */
2003 if (__intel_wait_for_register_fw(dev_priv,
2004 GEN6_BSD_SLEEP_PSMI_CONTROL,
2005 GEN6_BSD_SLEEP_INDICATOR,
2008 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
2010 /* Now that the ring is fully powered up, update the tail */
2011 i9xx_submit_request(request);
2013 /* Let the ring send IDLE messages to the GT again,
2014 * and so let it sleep to conserve power when idle.
2016 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2017 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2019 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2022 static int mi_flush_dw(struct i915_request *rq, u32 flags)
2026 cs = intel_ring_begin(rq, 4);
2033 * We always require a command barrier so that subsequent
2034 * commands, such as breadcrumb interrupts, are strictly ordered
2035 * wrt the contents of the write cache being flushed to memory
2036 * (and thus being coherent from the CPU).
2038 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2041 * Bspec vol 1c.3 - blitter engine command streamer:
2042 * "If ENABLED, all TLBs will be invalidated once the flush
2043 * operation is complete. This bit is only valid when the
2044 * Post-Sync Operation field is a value of 1h or 3h."
2049 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
2053 intel_ring_advance(rq, cs);
2058 static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
2060 return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
2063 static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
2065 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
2069 hsw_emit_bb_start(struct i915_request *rq,
2070 u64 offset, u32 len,
2071 unsigned int dispatch_flags)
2075 cs = intel_ring_begin(rq, 2);
2079 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
2080 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW);
2081 /* bit0-7 is the length on GEN6+ */
2083 intel_ring_advance(rq, cs);
2089 gen6_emit_bb_start(struct i915_request *rq,
2090 u64 offset, u32 len,
2091 unsigned int dispatch_flags)
2095 cs = intel_ring_begin(rq, 2);
2099 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
2100 0 : MI_BATCH_NON_SECURE_I965);
2101 /* bit0-7 is the length on GEN6+ */
2103 intel_ring_advance(rq, cs);
2108 /* Blitter support (SandyBridge+) */
2110 static int gen6_ring_flush(struct i915_request *rq, u32 mode)
2112 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
2115 static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
2116 struct intel_engine_cs *engine)
2120 if (!HAS_LEGACY_SEMAPHORES(dev_priv))
2123 GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
2124 engine->semaphore.sync_to = gen6_ring_sync_to;
2125 engine->semaphore.signal = gen6_signal;
2128 * The current semaphore is only applied on pre-gen8
2129 * platform. And there is no VCS2 ring on the pre-gen8
2130 * platform. So the semaphore between RCS and VCS2 is
2131 * initialized as INVALID.
2133 for (i = 0; i < GEN6_NUM_SEMAPHORES; i++) {
2134 static const struct {
2136 i915_reg_t mbox_reg;
2137 } sem_data[GEN6_NUM_SEMAPHORES][GEN6_NUM_SEMAPHORES] = {
2139 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
2140 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
2141 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
2144 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
2145 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
2146 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
2149 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
2150 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
2151 [VECS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
2154 [RCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
2155 [VCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
2156 [BCS_HW] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
2160 i915_reg_t mbox_reg;
2162 if (i == engine->hw_id) {
2163 wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
2164 mbox_reg = GEN6_NOSYNC;
2166 wait_mbox = sem_data[engine->hw_id][i].wait_mbox;
2167 mbox_reg = sem_data[engine->hw_id][i].mbox_reg;
2170 engine->semaphore.mbox.wait[i] = wait_mbox;
2171 engine->semaphore.mbox.signal[i] = mbox_reg;
2175 static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
2176 struct intel_engine_cs *engine)
2178 if (INTEL_GEN(dev_priv) >= 6) {
2179 engine->irq_enable = gen6_irq_enable;
2180 engine->irq_disable = gen6_irq_disable;
2181 engine->irq_seqno_barrier = gen6_seqno_barrier;
2182 } else if (INTEL_GEN(dev_priv) >= 5) {
2183 engine->irq_enable = gen5_irq_enable;
2184 engine->irq_disable = gen5_irq_disable;
2185 engine->irq_seqno_barrier = gen5_seqno_barrier;
2186 } else if (INTEL_GEN(dev_priv) >= 3) {
2187 engine->irq_enable = i9xx_irq_enable;
2188 engine->irq_disable = i9xx_irq_disable;
2190 engine->irq_enable = i8xx_irq_enable;
2191 engine->irq_disable = i8xx_irq_disable;
2195 static void i9xx_set_default_submission(struct intel_engine_cs *engine)
2197 engine->submit_request = i9xx_submit_request;
2198 engine->cancel_requests = cancel_requests;
2200 engine->park = NULL;
2201 engine->unpark = NULL;
2204 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
2206 i9xx_set_default_submission(engine);
2207 engine->submit_request = gen6_bsd_submit_request;
2210 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2211 struct intel_engine_cs *engine)
2213 /* gen8+ are only supported with execlists */
2214 GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8);
2216 intel_ring_init_irq(dev_priv, engine);
2217 intel_ring_init_semaphores(dev_priv, engine);
2219 engine->init_hw = init_ring_common;
2220 engine->reset.prepare = reset_prepare;
2221 engine->reset.reset = reset_ring;
2222 engine->reset.finish = reset_finish;
2224 engine->context_pin = intel_ring_context_pin;
2225 engine->request_alloc = ring_request_alloc;
2227 engine->emit_breadcrumb = i9xx_emit_breadcrumb;
2228 engine->emit_breadcrumb_sz = i9xx_emit_breadcrumb_sz;
2229 if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
2232 engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
2234 num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
2235 engine->emit_breadcrumb_sz += num_rings * 3;
2237 engine->emit_breadcrumb_sz++;
2240 engine->set_default_submission = i9xx_set_default_submission;
2242 if (INTEL_GEN(dev_priv) >= 6)
2243 engine->emit_bb_start = gen6_emit_bb_start;
2244 else if (INTEL_GEN(dev_priv) >= 4)
2245 engine->emit_bb_start = i965_emit_bb_start;
2246 else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
2247 engine->emit_bb_start = i830_emit_bb_start;
2249 engine->emit_bb_start = i915_emit_bb_start;
2252 int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2254 struct drm_i915_private *dev_priv = engine->i915;
2257 intel_ring_default_vfuncs(dev_priv, engine);
2259 if (HAS_L3_DPF(dev_priv))
2260 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2262 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2264 if (INTEL_GEN(dev_priv) >= 6) {
2265 engine->init_context = intel_rcs_ctx_init;
2266 engine->emit_flush = gen7_render_ring_flush;
2267 if (IS_GEN6(dev_priv))
2268 engine->emit_flush = gen6_render_ring_flush;
2269 } else if (IS_GEN5(dev_priv)) {
2270 engine->emit_flush = gen4_render_ring_flush;
2272 if (INTEL_GEN(dev_priv) < 4)
2273 engine->emit_flush = gen2_render_ring_flush;
2275 engine->emit_flush = gen4_render_ring_flush;
2276 engine->irq_enable_mask = I915_USER_INTERRUPT;
2279 if (IS_HASWELL(dev_priv))
2280 engine->emit_bb_start = hsw_emit_bb_start;
2282 engine->init_hw = init_render_ring;
2284 ret = intel_init_ring_buffer(engine);
2291 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
2293 struct drm_i915_private *dev_priv = engine->i915;
2295 intel_ring_default_vfuncs(dev_priv, engine);
2297 if (INTEL_GEN(dev_priv) >= 6) {
2298 /* gen6 bsd needs a special wa for tail updates */
2299 if (IS_GEN6(dev_priv))
2300 engine->set_default_submission = gen6_bsd_set_default_submission;
2301 engine->emit_flush = gen6_bsd_ring_flush;
2302 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2304 engine->emit_flush = bsd_ring_flush;
2305 if (IS_GEN5(dev_priv))
2306 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2308 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2311 return intel_init_ring_buffer(engine);
2314 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
2316 struct drm_i915_private *dev_priv = engine->i915;
2318 intel_ring_default_vfuncs(dev_priv, engine);
2320 engine->emit_flush = gen6_ring_flush;
2321 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2323 return intel_init_ring_buffer(engine);
2326 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
2328 struct drm_i915_private *dev_priv = engine->i915;
2330 intel_ring_default_vfuncs(dev_priv, engine);
2332 engine->emit_flush = gen6_ring_flush;
2333 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2334 engine->irq_enable = hsw_vebox_irq_enable;
2335 engine->irq_disable = hsw_vebox_irq_disable;
2337 return intel_init_ring_buffer(engine);