2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
30 #include "gen2_engine_cs.h"
31 #include "gen6_engine_cs.h"
32 #include "gen6_ppgtt.h"
33 #include "gen7_renderclear.h"
35 #include "intel_context.h"
37 #include "intel_reset.h"
38 #include "intel_ring.h"
39 #include "shmem_utils.h"
41 /* Rough estimate of the typical request size, performing a flush,
42 * set-context and then emitting the batch.
44 #define LEGACY_REQUEST_SIZE 200
46 static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
49 * Keep the render interrupt unmasked as this papers over
50 * lost interrupts following a reset.
52 if (engine->class == RENDER_CLASS) {
53 if (INTEL_GEN(engine->i915) >= 6)
56 mask &= ~I915_USER_INTERRUPT;
59 intel_engine_set_hwsp_writemask(engine, mask);
62 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
66 addr = lower_32_bits(phys);
67 if (INTEL_GEN(engine->i915) >= 4)
68 addr |= (phys >> 28) & 0xf0;
70 intel_uncore_write(engine->uncore, HWS_PGA, addr);
73 static struct page *status_page(struct intel_engine_cs *engine)
75 struct drm_i915_gem_object *obj = engine->status_page.vma->obj;
77 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
78 return sg_page(obj->mm.pages->sgl);
81 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
83 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine))));
84 set_hwstam(engine, ~0u);
87 static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
92 * The ring status page addresses are no longer next to the rest of
93 * the ring registers as of gen7.
95 if (IS_GEN(engine->i915, 7)) {
98 * No more rings exist on Gen7. Default case is only to shut up
99 * gcc switch check warning.
102 GEM_BUG_ON(engine->id);
105 hwsp = RENDER_HWS_PGA_GEN7;
108 hwsp = BLT_HWS_PGA_GEN7;
111 hwsp = BSD_HWS_PGA_GEN7;
114 hwsp = VEBOX_HWS_PGA_GEN7;
117 } else if (IS_GEN(engine->i915, 6)) {
118 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
120 hwsp = RING_HWS_PGA(engine->mmio_base);
123 intel_uncore_write(engine->uncore, hwsp, offset);
124 intel_uncore_posting_read(engine->uncore, hwsp);
127 static void flush_cs_tlb(struct intel_engine_cs *engine)
129 struct drm_i915_private *dev_priv = engine->i915;
131 if (!IS_GEN_RANGE(dev_priv, 6, 7))
134 /* ring should be idle before issuing a sync flush*/
135 drm_WARN_ON(&dev_priv->drm,
136 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
138 ENGINE_WRITE(engine, RING_INSTPM,
139 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
141 if (intel_wait_for_register(engine->uncore,
142 RING_INSTPM(engine->mmio_base),
143 INSTPM_SYNC_FLUSH, 0,
145 drm_err(&dev_priv->drm,
146 "%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
150 static void ring_setup_status_page(struct intel_engine_cs *engine)
152 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma));
153 set_hwstam(engine, ~0u);
155 flush_cs_tlb(engine);
158 static bool stop_ring(struct intel_engine_cs *engine)
160 struct drm_i915_private *dev_priv = engine->i915;
162 if (INTEL_GEN(dev_priv) > 2) {
164 RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING));
165 if (intel_wait_for_register(engine->uncore,
166 RING_MI_MODE(engine->mmio_base),
170 drm_err(&dev_priv->drm,
171 "%s : timed out trying to stop ring\n",
175 * Sometimes we observe that the idle flag is not
176 * set even though the ring is empty. So double
177 * check before giving up.
179 if (ENGINE_READ(engine, RING_HEAD) !=
180 ENGINE_READ(engine, RING_TAIL))
185 ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL));
187 ENGINE_WRITE(engine, RING_HEAD, 0);
188 ENGINE_WRITE(engine, RING_TAIL, 0);
190 /* The ring must be empty before it is disabled */
191 ENGINE_WRITE(engine, RING_CTL, 0);
193 return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
196 static struct i915_address_space *vm_alias(struct i915_address_space *vm)
198 if (i915_is_ggtt(vm))
199 vm = &i915_vm_to_ggtt(vm)->alias->vm;
204 static void set_pp_dir(struct intel_engine_cs *engine)
206 struct i915_address_space *vm = vm_alias(engine->gt->vm);
209 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
211 ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
212 ENGINE_WRITE(engine, RING_PP_DIR_BASE,
213 px_base(ppgtt->pd)->ggtt_offset << 10);
217 static int xcs_resume(struct intel_engine_cs *engine)
219 struct drm_i915_private *dev_priv = engine->i915;
220 struct intel_ring *ring = engine->legacy.ring;
223 ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
224 ring->head, ring->tail);
226 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
228 /* WaClearRingBufHeadRegAtInit:ctg,elk */
229 if (!stop_ring(engine)) {
230 /* G45 ring initialization often fails to reset head to zero */
231 drm_dbg(&dev_priv->drm, "%s head not reset to zero "
232 "ctl %08x head %08x tail %08x start %08x\n",
234 ENGINE_READ(engine, RING_CTL),
235 ENGINE_READ(engine, RING_HEAD),
236 ENGINE_READ(engine, RING_TAIL),
237 ENGINE_READ(engine, RING_START));
239 if (!stop_ring(engine)) {
240 drm_err(&dev_priv->drm,
241 "failed to set %s head to zero "
242 "ctl %08x head %08x tail %08x start %08x\n",
244 ENGINE_READ(engine, RING_CTL),
245 ENGINE_READ(engine, RING_HEAD),
246 ENGINE_READ(engine, RING_TAIL),
247 ENGINE_READ(engine, RING_START));
253 if (HWS_NEEDS_PHYSICAL(dev_priv))
254 ring_setup_phys_status_page(engine);
256 ring_setup_status_page(engine);
258 intel_engine_reset_breadcrumbs(engine);
260 /* Enforce ordering by reading HEAD register back */
261 ENGINE_POSTING_READ(engine, RING_HEAD);
264 * Initialize the ring. This must happen _after_ we've cleared the ring
265 * registers with the above sequence (the readback of the HEAD registers
266 * also enforces ordering), otherwise the hw might lose the new ring
269 ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
271 /* Check that the ring offsets point within the ring! */
272 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
273 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
274 intel_ring_update_space(ring);
278 /* First wake the ring up to an empty/idle ring */
279 ENGINE_WRITE(engine, RING_HEAD, ring->head);
280 ENGINE_WRITE(engine, RING_TAIL, ring->head);
281 ENGINE_POSTING_READ(engine, RING_TAIL);
283 ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID);
285 /* If the head is still not zero, the ring is dead */
286 if (intel_wait_for_register(engine->uncore,
287 RING_CTL(engine->mmio_base),
288 RING_VALID, RING_VALID,
290 drm_err(&dev_priv->drm, "%s initialization failed "
291 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
293 ENGINE_READ(engine, RING_CTL),
294 ENGINE_READ(engine, RING_CTL) & RING_VALID,
295 ENGINE_READ(engine, RING_HEAD), ring->head,
296 ENGINE_READ(engine, RING_TAIL), ring->tail,
297 ENGINE_READ(engine, RING_START),
298 i915_ggtt_offset(ring->vma));
303 if (INTEL_GEN(dev_priv) > 2)
305 RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
307 /* Now awake, let it get started */
308 if (ring->tail != ring->head) {
309 ENGINE_WRITE(engine, RING_TAIL, ring->tail);
310 ENGINE_POSTING_READ(engine, RING_TAIL);
313 /* Papering over lost _interrupts_ immediately following the restart */
314 intel_engine_signal_breadcrumbs(engine);
316 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
321 static void reset_prepare(struct intel_engine_cs *engine)
323 struct intel_uncore *uncore = engine->uncore;
324 const u32 base = engine->mmio_base;
327 * We stop engines, otherwise we might get failed reset and a
328 * dead gpu (on elk). Also as modern gpu as kbl can suffer
329 * from system hang if batchbuffer is progressing when
330 * the reset is issued, regardless of READY_TO_RESET ack.
331 * Thus assume it is best to stop engines on all gens
332 * where we have a gpu reset.
334 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
336 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
338 * FIXME: Wa for more modern gens needs to be validated
340 ENGINE_TRACE(engine, "\n");
342 if (intel_engine_stop_cs(engine))
343 ENGINE_TRACE(engine, "timed out on STOP_RING\n");
345 intel_uncore_write_fw(uncore,
347 intel_uncore_read_fw(uncore, RING_TAIL(base)));
348 intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
350 intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
351 intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
352 intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
354 /* The ring must be empty before it is disabled */
355 intel_uncore_write_fw(uncore, RING_CTL(base), 0);
357 /* Check acts as a post */
358 if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
359 ENGINE_TRACE(engine, "ring head [%x] not parked\n",
360 intel_uncore_read_fw(uncore, RING_HEAD(base)));
363 static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
365 struct i915_request *pos, *rq;
370 spin_lock_irqsave(&engine->active.lock, flags);
371 list_for_each_entry(pos, &engine->active.requests, sched.link) {
372 if (!i915_request_completed(pos)) {
379 * The guilty request will get skipped on a hung engine.
381 * Users of client default contexts do not rely on logical
382 * state preserved between batches so it is safe to execute
383 * queued requests following the hang. Non default contexts
384 * rely on preserved state, so skipping a batch loses the
385 * evolution of the state and it needs to be considered corrupted.
386 * Executing more queued batches on top of corrupted state is
387 * risky. But we take the risk by trying to advance through
388 * the queued requests in order to make the client behaviour
389 * more predictable around resets, by not throwing away random
390 * amount of batches it has prepared for execution. Sophisticated
391 * clients can use gem_reset_stats_ioctl and dma fence status
392 * (exported via sync_file info ioctl on explicit fences) to observe
393 * when it loses the context state and should rebuild accordingly.
395 * The context ban, and ultimately the client ban, mechanism are safety
396 * valves if client submission ends up resulting in nothing more than
402 * Try to restore the logical GPU state to match the
403 * continuation of the request queue. If we skip the
404 * context/PD restore, then the next request may try to execute
405 * assuming that its context is valid and loaded on the GPU and
406 * so may try to access invalid memory, prompting repeated GPU
409 * If the request was guilty, we still restore the logical
410 * state in case the next request requires it (e.g. the
411 * aliasing ppgtt), but skip over the hung batch.
413 * If the request was innocent, we try to replay the request
414 * with the restored context.
416 __i915_request_reset(rq, stalled);
418 GEM_BUG_ON(rq->ring != engine->legacy.ring);
421 head = engine->legacy.ring->tail;
423 engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
425 spin_unlock_irqrestore(&engine->active.lock, flags);
428 static void reset_finish(struct intel_engine_cs *engine)
432 static void reset_cancel(struct intel_engine_cs *engine)
434 struct i915_request *request;
437 spin_lock_irqsave(&engine->active.lock, flags);
439 /* Mark all submitted requests as skipped. */
440 list_for_each_entry(request, &engine->active.requests, sched.link) {
441 i915_request_set_error_once(request, -EIO);
442 i915_request_mark_complete(request);
445 /* Remaining _unready_ requests will be nop'ed when submitted */
447 spin_unlock_irqrestore(&engine->active.lock, flags);
450 static void i9xx_submit_request(struct i915_request *request)
452 i915_request_submit(request);
453 wmb(); /* paranoid flush writes out of the WCB before mmio */
455 ENGINE_WRITE(request->engine, RING_TAIL,
456 intel_ring_set_tail(request->ring, request->tail));
459 static void __ring_context_fini(struct intel_context *ce)
461 i915_vma_put(ce->state);
464 static void ring_context_destroy(struct kref *ref)
466 struct intel_context *ce = container_of(ref, typeof(*ce), ref);
468 GEM_BUG_ON(intel_context_is_pinned(ce));
471 __ring_context_fini(ce);
473 intel_context_fini(ce);
474 intel_context_free(ce);
477 static int __context_pin_ppgtt(struct intel_context *ce)
479 struct i915_address_space *vm;
482 vm = vm_alias(ce->vm);
484 err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)));
489 static void __context_unpin_ppgtt(struct intel_context *ce)
491 struct i915_address_space *vm;
493 vm = vm_alias(ce->vm);
495 gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
498 static void ring_context_unpin(struct intel_context *ce)
500 __context_unpin_ppgtt(ce);
503 static struct i915_vma *
504 alloc_context_vma(struct intel_engine_cs *engine)
506 struct drm_i915_private *i915 = engine->i915;
507 struct drm_i915_gem_object *obj;
508 struct i915_vma *vma;
511 obj = i915_gem_object_create_shmem(i915, engine->context_size);
513 return ERR_CAST(obj);
516 * Try to make the context utilize L3 as well as LLC.
518 * On VLV we don't have L3 controls in the PTEs so we
519 * shouldn't touch the cache level, especially as that
520 * would make the object snooped which might have a
521 * negative performance impact.
523 * Snooping is required on non-llc platforms in execlist
524 * mode, but since all GGTT accesses use PAT entry 0 we
525 * get snooping anyway regardless of cache_level.
527 * This is only applicable for Ivy Bridge devices since
528 * later platforms don't have L3 control bits in the PTE.
530 if (IS_IVYBRIDGE(i915))
531 i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
533 if (engine->default_state) {
536 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
538 err = PTR_ERR(vaddr);
542 shmem_read(engine->default_state, 0,
543 vaddr, engine->context_size);
545 i915_gem_object_flush_map(obj);
546 __i915_gem_object_release_map(obj);
549 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
558 i915_gem_object_put(obj);
562 static int ring_context_alloc(struct intel_context *ce)
564 struct intel_engine_cs *engine = ce->engine;
566 /* One ringbuffer to rule them all */
567 GEM_BUG_ON(!engine->legacy.ring);
568 ce->ring = engine->legacy.ring;
569 ce->timeline = intel_timeline_get(engine->legacy.timeline);
571 GEM_BUG_ON(ce->state);
572 if (engine->context_size) {
573 struct i915_vma *vma;
575 vma = alloc_context_vma(engine);
580 if (engine->default_state)
581 __set_bit(CONTEXT_VALID_BIT, &ce->flags);
587 static int ring_context_pin(struct intel_context *ce)
589 return __context_pin_ppgtt(ce);
592 static void ring_context_reset(struct intel_context *ce)
594 intel_ring_reset(ce->ring, ce->ring->emit);
597 static const struct intel_context_ops ring_context_ops = {
598 .alloc = ring_context_alloc,
600 .pin = ring_context_pin,
601 .unpin = ring_context_unpin,
603 .enter = intel_context_enter_engine,
604 .exit = intel_context_exit_engine,
606 .reset = ring_context_reset,
607 .destroy = ring_context_destroy,
610 static int load_pd_dir(struct i915_request *rq,
611 const struct i915_ppgtt *ppgtt,
614 const struct intel_engine_cs * const engine = rq->engine;
617 cs = intel_ring_begin(rq, 12);
621 *cs++ = MI_LOAD_REGISTER_IMM(1);
622 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
625 *cs++ = MI_LOAD_REGISTER_IMM(1);
626 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
627 *cs++ = px_base(ppgtt->pd)->ggtt_offset << 10;
629 /* Stall until the page table load is complete? */
630 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
631 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
632 *cs++ = intel_gt_scratch_offset(engine->gt,
633 INTEL_GT_SCRATCH_FIELD_DEFAULT);
635 *cs++ = MI_LOAD_REGISTER_IMM(1);
636 *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base));
637 *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE);
639 intel_ring_advance(rq, cs);
641 return rq->engine->emit_flush(rq, EMIT_FLUSH);
644 static inline int mi_set_context(struct i915_request *rq,
645 struct intel_context *ce,
648 struct intel_engine_cs *engine = rq->engine;
649 struct drm_i915_private *i915 = engine->i915;
650 enum intel_engine_id id;
651 const int num_engines =
652 IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0;
653 bool force_restore = false;
659 len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
660 else if (IS_GEN(i915, 5))
662 if (flags & MI_FORCE_RESTORE) {
663 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
664 flags &= ~MI_FORCE_RESTORE;
665 force_restore = true;
669 cs = intel_ring_begin(rq, len);
673 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
674 if (IS_GEN(i915, 7)) {
675 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
677 struct intel_engine_cs *signaller;
679 *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
680 for_each_engine(signaller, engine->gt, id) {
681 if (signaller == engine)
684 *cs++ = i915_mmio_reg_offset(
685 RING_PSMI_CTL(signaller->mmio_base));
686 *cs++ = _MASKED_BIT_ENABLE(
687 GEN6_PSMI_SLEEP_MSG_DISABLE);
690 } else if (IS_GEN(i915, 5)) {
692 * This w/a is only listed for pre-production ilk a/b steppings,
693 * but is also mentioned for programming the powerctx. To be
694 * safe, just apply the workaround; we do not use SyncFlush so
695 * this should never take effect and so be a no-op!
697 *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN;
702 * The HW doesn't handle being told to restore the current
703 * context very well. Quite often it likes goes to go off and
704 * sulk, especially when it is meant to be reloading PP_DIR.
705 * A very simple fix to force the reload is to simply switch
706 * away from the current context and back again.
708 * Note that the kernel_context will contain random state
709 * following the INHIBIT_RESTORE. We accept this since we
710 * never use the kernel_context state; it is merely a
711 * placeholder we use to flush other contexts.
713 *cs++ = MI_SET_CONTEXT;
714 *cs++ = i915_ggtt_offset(engine->kernel_context->state) |
720 *cs++ = MI_SET_CONTEXT;
721 *cs++ = i915_ggtt_offset(ce->state) | flags;
723 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
724 * WaMiSetContext_Hang:snb,ivb,vlv
728 if (IS_GEN(i915, 7)) {
730 struct intel_engine_cs *signaller;
731 i915_reg_t last_reg = {}; /* keep gcc quiet */
733 *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
734 for_each_engine(signaller, engine->gt, id) {
735 if (signaller == engine)
738 last_reg = RING_PSMI_CTL(signaller->mmio_base);
739 *cs++ = i915_mmio_reg_offset(last_reg);
740 *cs++ = _MASKED_BIT_DISABLE(
741 GEN6_PSMI_SLEEP_MSG_DISABLE);
744 /* Insert a delay before the next switch! */
745 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
746 *cs++ = i915_mmio_reg_offset(last_reg);
747 *cs++ = intel_gt_scratch_offset(engine->gt,
748 INTEL_GT_SCRATCH_FIELD_DEFAULT);
751 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
752 } else if (IS_GEN(i915, 5)) {
753 *cs++ = MI_SUSPEND_FLUSH;
756 intel_ring_advance(rq, cs);
761 static int remap_l3_slice(struct i915_request *rq, int slice)
763 u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice];
769 cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2);
774 * Note: We do not worry about the concurrent register cacheline hang
775 * here because no other code should access these registers other than
776 * at initialization time.
778 *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
779 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
780 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
781 *cs++ = remap_info[i];
784 intel_ring_advance(rq, cs);
789 static int remap_l3(struct i915_request *rq)
791 struct i915_gem_context *ctx = i915_request_gem_context(rq);
794 if (!ctx || !ctx->remap_slice)
797 for (i = 0; i < MAX_L3_SLICES; i++) {
798 if (!(ctx->remap_slice & BIT(i)))
801 err = remap_l3_slice(rq, i);
806 ctx->remap_slice = 0;
810 static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
817 ret = rq->engine->emit_flush(rq, EMIT_FLUSH);
822 * Not only do we need a full barrier (post-sync write) after
823 * invalidating the TLBs, but we need to wait a little bit
824 * longer. Whether this is merely delaying us, or the
825 * subsequent flush is a key part of serialising with the
826 * post-sync op, this extra pass appears vital before a
829 ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm), PP_DIR_DCLV_2G);
833 return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
836 static int clear_residuals(struct i915_request *rq)
838 struct intel_engine_cs *engine = rq->engine;
841 ret = switch_mm(rq, vm_alias(engine->kernel_context->vm));
845 if (engine->kernel_context->state) {
846 ret = mi_set_context(rq,
847 engine->kernel_context,
848 MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT);
853 ret = engine->emit_bb_start(rq,
854 engine->wa_ctx.vma->node.start, 0,
859 ret = engine->emit_flush(rq, EMIT_FLUSH);
863 /* Always invalidate before the next switch_mm() */
864 return engine->emit_flush(rq, EMIT_INVALIDATE);
867 static int switch_context(struct i915_request *rq)
869 struct intel_engine_cs *engine = rq->engine;
870 struct intel_context *ce = rq->context;
871 void **residuals = NULL;
874 GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
876 if (engine->wa_ctx.vma && ce != engine->kernel_context) {
877 if (engine->wa_ctx.vma->private != ce) {
878 ret = clear_residuals(rq);
882 residuals = &engine->wa_ctx.vma->private;
886 ret = switch_mm(rq, vm_alias(ce->vm));
893 GEM_BUG_ON(engine->id != RCS0);
895 /* For resource streamer on HSW+ and power context elsewhere */
896 BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
897 BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN);
899 flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT;
900 if (test_bit(CONTEXT_VALID_BIT, &ce->flags))
901 flags |= MI_RESTORE_EXT_STATE_EN;
903 flags |= MI_RESTORE_INHIBIT;
905 ret = mi_set_context(rq, ce, flags);
915 * Now past the point of no return, this request _will_ be emitted.
917 * Or at least this preamble will be emitted, the request may be
918 * interrupted prior to submitting the user payload. If so, we
919 * still submit the "empty" request in order to preserve global
920 * state tracking such as this, our tracking of the current
924 intel_context_put(*residuals);
925 *residuals = intel_context_get(ce);
931 static int ring_request_alloc(struct i915_request *request)
935 GEM_BUG_ON(!intel_context_is_pinned(request->context));
936 GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
939 * Flush enough space to reduce the likelihood of waiting after
940 * we start building the request - in which case we will just
941 * have to repeat work.
943 request->reserved_space += LEGACY_REQUEST_SIZE;
945 /* Unconditionally invalidate GPU caches and TLBs. */
946 ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
950 ret = switch_context(request);
954 request->reserved_space -= LEGACY_REQUEST_SIZE;
958 static void gen6_bsd_submit_request(struct i915_request *request)
960 struct intel_uncore *uncore = request->engine->uncore;
962 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
964 /* Every tail move must follow the sequence below */
966 /* Disable notification that the ring is IDLE. The GT
967 * will then assume that it is busy and bring it out of rc6.
969 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
970 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
972 /* Clear the context id. Here be magic! */
973 intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
975 /* Wait for the ring not to be idle, i.e. for it to wake up. */
976 if (__intel_wait_for_register_fw(uncore,
977 GEN6_BSD_SLEEP_PSMI_CONTROL,
978 GEN6_BSD_SLEEP_INDICATOR,
981 drm_err(&uncore->i915->drm,
982 "timed out waiting for the BSD ring to wake up\n");
984 /* Now that the ring is fully powered up, update the tail */
985 i9xx_submit_request(request);
987 /* Let the ring send IDLE messages to the GT again,
988 * and so let it sleep to conserve power when idle.
990 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
991 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
993 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
996 static void i9xx_set_default_submission(struct intel_engine_cs *engine)
998 engine->submit_request = i9xx_submit_request;
1000 engine->park = NULL;
1001 engine->unpark = NULL;
1004 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
1006 i9xx_set_default_submission(engine);
1007 engine->submit_request = gen6_bsd_submit_request;
1010 static void ring_release(struct intel_engine_cs *engine)
1012 struct drm_i915_private *dev_priv = engine->i915;
1014 drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) > 2 &&
1015 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
1017 intel_engine_cleanup_common(engine);
1019 if (engine->wa_ctx.vma) {
1020 intel_context_put(engine->wa_ctx.vma->private);
1021 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
1024 intel_ring_unpin(engine->legacy.ring);
1025 intel_ring_put(engine->legacy.ring);
1027 intel_timeline_unpin(engine->legacy.timeline);
1028 intel_timeline_put(engine->legacy.timeline);
1031 static void setup_irq(struct intel_engine_cs *engine)
1033 struct drm_i915_private *i915 = engine->i915;
1035 if (INTEL_GEN(i915) >= 6) {
1036 engine->irq_enable = gen6_irq_enable;
1037 engine->irq_disable = gen6_irq_disable;
1038 } else if (INTEL_GEN(i915) >= 5) {
1039 engine->irq_enable = gen5_irq_enable;
1040 engine->irq_disable = gen5_irq_disable;
1041 } else if (INTEL_GEN(i915) >= 3) {
1042 engine->irq_enable = gen3_irq_enable;
1043 engine->irq_disable = gen3_irq_disable;
1045 engine->irq_enable = gen2_irq_enable;
1046 engine->irq_disable = gen2_irq_disable;
1050 static void setup_common(struct intel_engine_cs *engine)
1052 struct drm_i915_private *i915 = engine->i915;
1054 /* gen8+ are only supported with execlists */
1055 GEM_BUG_ON(INTEL_GEN(i915) >= 8);
1059 engine->resume = xcs_resume;
1060 engine->reset.prepare = reset_prepare;
1061 engine->reset.rewind = reset_rewind;
1062 engine->reset.cancel = reset_cancel;
1063 engine->reset.finish = reset_finish;
1065 engine->cops = &ring_context_ops;
1066 engine->request_alloc = ring_request_alloc;
1069 * Using a global execution timeline; the previous final breadcrumb is
1070 * equivalent to our next initial bread so we can elide
1071 * engine->emit_init_breadcrumb().
1073 engine->emit_fini_breadcrumb = gen3_emit_breadcrumb;
1074 if (IS_GEN(i915, 5))
1075 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
1077 engine->set_default_submission = i9xx_set_default_submission;
1079 if (INTEL_GEN(i915) >= 6)
1080 engine->emit_bb_start = gen6_emit_bb_start;
1081 else if (INTEL_GEN(i915) >= 4)
1082 engine->emit_bb_start = gen4_emit_bb_start;
1083 else if (IS_I830(i915) || IS_I845G(i915))
1084 engine->emit_bb_start = i830_emit_bb_start;
1086 engine->emit_bb_start = gen3_emit_bb_start;
1089 static void setup_rcs(struct intel_engine_cs *engine)
1091 struct drm_i915_private *i915 = engine->i915;
1093 if (HAS_L3_DPF(i915))
1094 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
1096 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1098 if (INTEL_GEN(i915) >= 7) {
1099 engine->emit_flush = gen7_emit_flush_rcs;
1100 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs;
1101 } else if (IS_GEN(i915, 6)) {
1102 engine->emit_flush = gen6_emit_flush_rcs;
1103 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs;
1104 } else if (IS_GEN(i915, 5)) {
1105 engine->emit_flush = gen4_emit_flush_rcs;
1107 if (INTEL_GEN(i915) < 4)
1108 engine->emit_flush = gen2_emit_flush;
1110 engine->emit_flush = gen4_emit_flush_rcs;
1111 engine->irq_enable_mask = I915_USER_INTERRUPT;
1114 if (IS_HASWELL(i915))
1115 engine->emit_bb_start = hsw_emit_bb_start;
1118 static void setup_vcs(struct intel_engine_cs *engine)
1120 struct drm_i915_private *i915 = engine->i915;
1122 if (INTEL_GEN(i915) >= 6) {
1123 /* gen6 bsd needs a special wa for tail updates */
1124 if (IS_GEN(i915, 6))
1125 engine->set_default_submission = gen6_bsd_set_default_submission;
1126 engine->emit_flush = gen6_emit_flush_vcs;
1127 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1129 if (IS_GEN(i915, 6))
1130 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
1132 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
1134 engine->emit_flush = gen4_emit_flush_vcs;
1135 if (IS_GEN(i915, 5))
1136 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
1138 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
1142 static void setup_bcs(struct intel_engine_cs *engine)
1144 struct drm_i915_private *i915 = engine->i915;
1146 engine->emit_flush = gen6_emit_flush_xcs;
1147 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
1149 if (IS_GEN(i915, 6))
1150 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
1152 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
1155 static void setup_vecs(struct intel_engine_cs *engine)
1157 struct drm_i915_private *i915 = engine->i915;
1159 GEM_BUG_ON(INTEL_GEN(i915) < 7);
1161 engine->emit_flush = gen6_emit_flush_xcs;
1162 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
1163 engine->irq_enable = hsw_irq_enable_vecs;
1164 engine->irq_disable = hsw_irq_disable_vecs;
1166 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
1169 static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,
1170 struct i915_vma * const vma)
1172 return gen7_setup_clear_gpr_bb(engine, vma);
1175 static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
1177 struct drm_i915_gem_object *obj;
1178 struct i915_vma *vma;
1182 size = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
1186 size = ALIGN(size, PAGE_SIZE);
1187 obj = i915_gem_object_create_internal(engine->i915, size);
1189 return PTR_ERR(obj);
1191 vma = i915_vma_instance(obj, engine->gt->vm, NULL);
1197 vma->private = intel_context_create(engine); /* dummy residuals */
1198 if (IS_ERR(vma->private)) {
1199 err = PTR_ERR(vma->private);
1203 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
1207 err = i915_vma_sync(vma);
1211 err = gen7_ctx_switch_bb_setup(engine, vma);
1215 engine->wa_ctx.vma = vma;
1219 i915_vma_unpin(vma);
1221 intel_context_put(vma->private);
1223 i915_gem_object_put(obj);
1227 int intel_ring_submission_setup(struct intel_engine_cs *engine)
1229 struct intel_timeline *timeline;
1230 struct intel_ring *ring;
1233 setup_common(engine);
1235 switch (engine->class) {
1239 case VIDEO_DECODE_CLASS:
1242 case COPY_ENGINE_CLASS:
1245 case VIDEO_ENHANCEMENT_CLASS:
1249 MISSING_CASE(engine->class);
1253 timeline = intel_timeline_create(engine->gt, engine->status_page.vma);
1254 if (IS_ERR(timeline)) {
1255 err = PTR_ERR(timeline);
1258 GEM_BUG_ON(timeline->has_initial_breadcrumb);
1260 err = intel_timeline_pin(timeline);
1264 ring = intel_engine_create_ring(engine, SZ_16K);
1266 err = PTR_ERR(ring);
1267 goto err_timeline_unpin;
1270 err = intel_ring_pin(ring);
1274 GEM_BUG_ON(engine->legacy.ring);
1275 engine->legacy.ring = ring;
1276 engine->legacy.timeline = timeline;
1278 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
1280 if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) {
1281 err = gen7_ctx_switch_bb_init(engine);
1283 goto err_ring_unpin;
1286 /* Finally, take ownership and responsibility for cleanup! */
1287 engine->release = ring_release;
1292 intel_ring_unpin(ring);
1294 intel_ring_put(ring);
1296 intel_timeline_unpin(timeline);
1298 intel_timeline_put(timeline);
1300 intel_engine_cleanup_common(engine);
1304 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1305 #include "selftest_ring_submission.c"