1 // SPDX-License-Identifier: MIT
3 * Copyright © 2008-2021 Intel Corporation
6 #include "gen2_engine_cs.h"
7 #include "gen6_engine_cs.h"
8 #include "gen6_ppgtt.h"
9 #include "gen7_renderclear.h"
11 #include "i915_mitigations.h"
12 #include "intel_breadcrumbs.h"
13 #include "intel_context.h"
15 #include "intel_gt_irq.h"
16 #include "intel_reset.h"
17 #include "intel_ring.h"
18 #include "shmem_utils.h"
19 #include "intel_engine_heartbeat.h"
21 /* Rough estimate of the typical request size, performing a flush,
22 * set-context and then emitting the batch.
24 #define LEGACY_REQUEST_SIZE 200
26 static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
29 * Keep the render interrupt unmasked as this papers over
30 * lost interrupts following a reset.
32 if (engine->class == RENDER_CLASS) {
33 if (GRAPHICS_VER(engine->i915) >= 6)
36 mask &= ~I915_USER_INTERRUPT;
39 intel_engine_set_hwsp_writemask(engine, mask);
42 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
46 addr = lower_32_bits(phys);
47 if (GRAPHICS_VER(engine->i915) >= 4)
48 addr |= (phys >> 28) & 0xf0;
50 intel_uncore_write(engine->uncore, HWS_PGA, addr);
53 static struct page *status_page(struct intel_engine_cs *engine)
55 struct drm_i915_gem_object *obj = engine->status_page.vma->obj;
57 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
58 return sg_page(obj->mm.pages->sgl);
61 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
63 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine))));
64 set_hwstam(engine, ~0u);
67 static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
72 * The ring status page addresses are no longer next to the rest of
73 * the ring registers as of gen7.
75 if (GRAPHICS_VER(engine->i915) == 7) {
78 * No more rings exist on Gen7. Default case is only to shut up
79 * gcc switch check warning.
82 GEM_BUG_ON(engine->id);
85 hwsp = RENDER_HWS_PGA_GEN7;
88 hwsp = BLT_HWS_PGA_GEN7;
91 hwsp = BSD_HWS_PGA_GEN7;
94 hwsp = VEBOX_HWS_PGA_GEN7;
97 } else if (GRAPHICS_VER(engine->i915) == 6) {
98 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
100 hwsp = RING_HWS_PGA(engine->mmio_base);
103 intel_uncore_write_fw(engine->uncore, hwsp, offset);
104 intel_uncore_posting_read_fw(engine->uncore, hwsp);
107 static void flush_cs_tlb(struct intel_engine_cs *engine)
109 if (!IS_GRAPHICS_VER(engine->i915, 6, 7))
112 /* ring should be idle before issuing a sync flush*/
113 GEM_DEBUG_WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
115 ENGINE_WRITE_FW(engine, RING_INSTPM,
116 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
118 if (__intel_wait_for_register_fw(engine->uncore,
119 RING_INSTPM(engine->mmio_base),
120 INSTPM_SYNC_FLUSH, 0,
123 "wait for SyncFlush to complete for TLB invalidation timed out\n");
126 static void ring_setup_status_page(struct intel_engine_cs *engine)
128 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma));
129 set_hwstam(engine, ~0u);
131 flush_cs_tlb(engine);
134 static struct i915_address_space *vm_alias(struct i915_address_space *vm)
136 if (i915_is_ggtt(vm))
137 vm = &i915_vm_to_ggtt(vm)->alias->vm;
142 static u32 pp_dir(struct i915_address_space *vm)
144 return to_gen6_ppgtt(i915_vm_to_ppgtt(vm))->pp_dir;
147 static void set_pp_dir(struct intel_engine_cs *engine)
149 struct i915_address_space *vm = vm_alias(engine->gt->vm);
154 ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
155 ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm));
157 if (GRAPHICS_VER(engine->i915) >= 7) {
158 ENGINE_WRITE_FW(engine,
160 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
164 static bool stop_ring(struct intel_engine_cs *engine)
166 /* Empty the ring by skipping to the end */
167 ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL));
168 ENGINE_POSTING_READ(engine, RING_HEAD);
170 /* The ring must be empty before it is disabled */
171 ENGINE_WRITE_FW(engine, RING_CTL, 0);
172 ENGINE_POSTING_READ(engine, RING_CTL);
174 /* Then reset the disabled ring */
175 ENGINE_WRITE_FW(engine, RING_HEAD, 0);
176 ENGINE_WRITE_FW(engine, RING_TAIL, 0);
178 return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0;
181 static int xcs_resume(struct intel_engine_cs *engine)
183 struct intel_ring *ring = engine->legacy.ring;
185 ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n",
186 ring->head, ring->tail);
189 * Double check the ring is empty & disabled before we resume. Called
190 * from atomic context during PCI probe, so _hardirq().
192 intel_synchronize_hardirq(engine->i915);
193 if (!stop_ring(engine))
196 if (HWS_NEEDS_PHYSICAL(engine->i915))
197 ring_setup_phys_status_page(engine);
199 ring_setup_status_page(engine);
201 intel_breadcrumbs_reset(engine->breadcrumbs);
203 /* Enforce ordering by reading HEAD register back */
204 ENGINE_POSTING_READ(engine, RING_HEAD);
207 * Initialize the ring. This must happen _after_ we've cleared the ring
208 * registers with the above sequence (the readback of the HEAD registers
209 * also enforces ordering), otherwise the hw might lose the new ring
212 ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma));
214 /* Check that the ring offsets point within the ring! */
215 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
216 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
217 intel_ring_update_space(ring);
221 /* First wake the ring up to an empty/idle ring */
222 ENGINE_WRITE_FW(engine, RING_HEAD, ring->head);
223 ENGINE_WRITE_FW(engine, RING_TAIL, ring->head);
224 ENGINE_POSTING_READ(engine, RING_TAIL);
226 ENGINE_WRITE_FW(engine, RING_CTL,
227 RING_CTL_SIZE(ring->size) | RING_VALID);
229 /* If the head is still not zero, the ring is dead */
230 if (__intel_wait_for_register_fw(engine->uncore,
231 RING_CTL(engine->mmio_base),
232 RING_VALID, RING_VALID,
236 if (GRAPHICS_VER(engine->i915) > 2)
237 ENGINE_WRITE_FW(engine,
238 RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
240 /* Now awake, let it get started */
241 if (ring->tail != ring->head) {
242 ENGINE_WRITE_FW(engine, RING_TAIL, ring->tail);
243 ENGINE_POSTING_READ(engine, RING_TAIL);
246 /* Papering over lost _interrupts_ immediately following the restart */
247 intel_engine_signal_breadcrumbs(engine);
251 drm_err(&engine->i915->drm,
252 "%s initialization failed; "
253 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
255 ENGINE_READ(engine, RING_CTL),
256 ENGINE_READ(engine, RING_CTL) & RING_VALID,
257 ENGINE_READ(engine, RING_HEAD), ring->head,
258 ENGINE_READ(engine, RING_TAIL), ring->tail,
259 ENGINE_READ(engine, RING_START),
260 i915_ggtt_offset(ring->vma));
264 static void sanitize_hwsp(struct intel_engine_cs *engine)
266 struct intel_timeline *tl;
268 list_for_each_entry(tl, &engine->status_page.timelines, engine_link)
269 intel_timeline_reset_seqno(tl);
272 static void xcs_sanitize(struct intel_engine_cs *engine)
275 * Poison residual state on resume, in case the suspend didn't!
277 * We have to assume that across suspend/resume (or other loss
278 * of control) that the contents of our pinned buffers has been
279 * lost, replaced by garbage. Since this doesn't always happen,
280 * let's poison such state so that we more quickly spot when
281 * we falsely assume it has been preserved.
283 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
284 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE);
287 * The kernel_context HWSP is stored in the status_page. As above,
288 * that may be lost on resume/initialisation, and so we need to
289 * reset the value in the HWSP.
291 sanitize_hwsp(engine);
293 /* And scrub the dirty cachelines for the HWSP */
294 clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
297 static void reset_prepare(struct intel_engine_cs *engine)
300 * We stop engines, otherwise we might get failed reset and a
301 * dead gpu (on elk). Also as modern gpu as kbl can suffer
302 * from system hang if batchbuffer is progressing when
303 * the reset is issued, regardless of READY_TO_RESET ack.
304 * Thus assume it is best to stop engines on all gens
305 * where we have a gpu reset.
307 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES)
309 * WaMediaResetMainRingCleanup:ctg,elk (presumably)
310 * WaClearRingBufHeadRegAtInit:ctg,elk
312 * FIXME: Wa for more modern gens needs to be validated
314 ENGINE_TRACE(engine, "\n");
315 intel_engine_stop_cs(engine);
317 if (!stop_ring(engine)) {
318 /* G45 ring initialization often fails to reset head to zero */
320 "HEAD not reset to zero, "
321 "{ CTL:%08x, HEAD:%08x, TAIL:%08x, START:%08x }\n",
322 ENGINE_READ_FW(engine, RING_CTL),
323 ENGINE_READ_FW(engine, RING_HEAD),
324 ENGINE_READ_FW(engine, RING_TAIL),
325 ENGINE_READ_FW(engine, RING_START));
326 if (!stop_ring(engine)) {
327 drm_err(&engine->i915->drm,
328 "failed to set %s head to zero "
329 "ctl %08x head %08x tail %08x start %08x\n",
331 ENGINE_READ_FW(engine, RING_CTL),
332 ENGINE_READ_FW(engine, RING_HEAD),
333 ENGINE_READ_FW(engine, RING_TAIL),
334 ENGINE_READ_FW(engine, RING_START));
339 static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
341 struct i915_request *pos, *rq;
346 spin_lock_irqsave(&engine->sched_engine->lock, flags);
348 list_for_each_entry(pos, &engine->sched_engine->requests, sched.link) {
349 if (!__i915_request_is_complete(pos)) {
357 * The guilty request will get skipped on a hung engine.
359 * Users of client default contexts do not rely on logical
360 * state preserved between batches so it is safe to execute
361 * queued requests following the hang. Non default contexts
362 * rely on preserved state, so skipping a batch loses the
363 * evolution of the state and it needs to be considered corrupted.
364 * Executing more queued batches on top of corrupted state is
365 * risky. But we take the risk by trying to advance through
366 * the queued requests in order to make the client behaviour
367 * more predictable around resets, by not throwing away random
368 * amount of batches it has prepared for execution. Sophisticated
369 * clients can use gem_reset_stats_ioctl and dma fence status
370 * (exported via sync_file info ioctl on explicit fences) to observe
371 * when it loses the context state and should rebuild accordingly.
373 * The context ban, and ultimately the client ban, mechanism are safety
374 * valves if client submission ends up resulting in nothing more than
380 * Try to restore the logical GPU state to match the
381 * continuation of the request queue. If we skip the
382 * context/PD restore, then the next request may try to execute
383 * assuming that its context is valid and loaded on the GPU and
384 * so may try to access invalid memory, prompting repeated GPU
387 * If the request was guilty, we still restore the logical
388 * state in case the next request requires it (e.g. the
389 * aliasing ppgtt), but skip over the hung batch.
391 * If the request was innocent, we try to replay the request
392 * with the restored context.
394 __i915_request_reset(rq, stalled);
396 GEM_BUG_ON(rq->ring != engine->legacy.ring);
399 head = engine->legacy.ring->tail;
401 engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
403 spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
406 static void reset_finish(struct intel_engine_cs *engine)
410 static void reset_cancel(struct intel_engine_cs *engine)
412 struct i915_request *request;
415 spin_lock_irqsave(&engine->sched_engine->lock, flags);
417 /* Mark all submitted requests as skipped. */
418 list_for_each_entry(request, &engine->sched_engine->requests, sched.link)
419 i915_request_put(i915_request_mark_eio(request));
420 intel_engine_signal_breadcrumbs(engine);
422 /* Remaining _unready_ requests will be nop'ed when submitted */
424 spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
427 static void i9xx_submit_request(struct i915_request *request)
429 i915_request_submit(request);
430 wmb(); /* paranoid flush writes out of the WCB before mmio */
432 ENGINE_WRITE(request->engine, RING_TAIL,
433 intel_ring_set_tail(request->ring, request->tail));
436 static void __ring_context_fini(struct intel_context *ce)
438 i915_vma_put(ce->state);
441 static void ring_context_destroy(struct kref *ref)
443 struct intel_context *ce = container_of(ref, typeof(*ce), ref);
445 GEM_BUG_ON(intel_context_is_pinned(ce));
448 __ring_context_fini(ce);
450 intel_context_fini(ce);
451 intel_context_free(ce);
454 static int ring_context_init_default_state(struct intel_context *ce,
455 struct i915_gem_ww_ctx *ww)
457 struct drm_i915_gem_object *obj = ce->state->obj;
460 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
462 return PTR_ERR(vaddr);
464 shmem_read(ce->engine->default_state, 0,
465 vaddr, ce->engine->context_size);
467 i915_gem_object_flush_map(obj);
468 __i915_gem_object_release_map(obj);
470 __set_bit(CONTEXT_VALID_BIT, &ce->flags);
474 static int ring_context_pre_pin(struct intel_context *ce,
475 struct i915_gem_ww_ctx *ww,
478 struct i915_address_space *vm;
481 if (ce->engine->default_state &&
482 !test_bit(CONTEXT_VALID_BIT, &ce->flags)) {
483 err = ring_context_init_default_state(ce, ww);
488 vm = vm_alias(ce->vm);
490 err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)), ww);
495 static void __context_unpin_ppgtt(struct intel_context *ce)
497 struct i915_address_space *vm;
499 vm = vm_alias(ce->vm);
501 gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
504 static void ring_context_unpin(struct intel_context *ce)
508 static void ring_context_post_unpin(struct intel_context *ce)
510 __context_unpin_ppgtt(ce);
513 static struct i915_vma *
514 alloc_context_vma(struct intel_engine_cs *engine)
516 struct drm_i915_private *i915 = engine->i915;
517 struct drm_i915_gem_object *obj;
518 struct i915_vma *vma;
521 obj = i915_gem_object_create_shmem(i915, engine->context_size);
523 return ERR_CAST(obj);
526 * Try to make the context utilize L3 as well as LLC.
528 * On VLV we don't have L3 controls in the PTEs so we
529 * shouldn't touch the cache level, especially as that
530 * would make the object snooped which might have a
531 * negative performance impact.
533 * Snooping is required on non-llc platforms in execlist
534 * mode, but since all GGTT accesses use PAT entry 0 we
535 * get snooping anyway regardless of cache_level.
537 * This is only applicable for Ivy Bridge devices since
538 * later platforms don't have L3 control bits in the PTE.
540 if (IS_IVYBRIDGE(i915))
541 i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
543 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
552 i915_gem_object_put(obj);
556 static int ring_context_alloc(struct intel_context *ce)
558 struct intel_engine_cs *engine = ce->engine;
560 /* One ringbuffer to rule them all */
561 GEM_BUG_ON(!engine->legacy.ring);
562 ce->ring = engine->legacy.ring;
563 ce->timeline = intel_timeline_get(engine->legacy.timeline);
565 GEM_BUG_ON(ce->state);
566 if (engine->context_size) {
567 struct i915_vma *vma;
569 vma = alloc_context_vma(engine);
579 static int ring_context_pin(struct intel_context *ce, void *unused)
584 static void ring_context_reset(struct intel_context *ce)
586 intel_ring_reset(ce->ring, ce->ring->emit);
587 clear_bit(CONTEXT_VALID_BIT, &ce->flags);
590 static void ring_context_ban(struct intel_context *ce,
591 struct i915_request *rq)
593 struct intel_engine_cs *engine;
595 if (!rq || !i915_request_is_active(rq))
599 lockdep_assert_held(&engine->sched_engine->lock);
600 list_for_each_entry_continue(rq, &engine->sched_engine->requests,
602 if (rq->context == ce) {
603 i915_request_set_error_once(rq, -EIO);
604 __i915_request_skip(rq);
608 static void ring_context_cancel_request(struct intel_context *ce,
609 struct i915_request *rq)
611 struct intel_engine_cs *engine = NULL;
613 i915_request_active_engine(rq, &engine);
615 if (engine && intel_engine_pulse(engine))
616 intel_gt_handle_error(engine->gt, engine->mask, 0,
617 "request cancellation by %s",
621 static const struct intel_context_ops ring_context_ops = {
622 .alloc = ring_context_alloc,
624 .cancel_request = ring_context_cancel_request,
626 .ban = ring_context_ban,
628 .pre_pin = ring_context_pre_pin,
629 .pin = ring_context_pin,
630 .unpin = ring_context_unpin,
631 .post_unpin = ring_context_post_unpin,
633 .enter = intel_context_enter_engine,
634 .exit = intel_context_exit_engine,
636 .reset = ring_context_reset,
637 .destroy = ring_context_destroy,
640 static int load_pd_dir(struct i915_request *rq,
641 struct i915_address_space *vm,
644 const struct intel_engine_cs * const engine = rq->engine;
647 cs = intel_ring_begin(rq, 12);
651 *cs++ = MI_LOAD_REGISTER_IMM(1);
652 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
655 *cs++ = MI_LOAD_REGISTER_IMM(1);
656 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
659 /* Stall until the page table load is complete? */
660 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
661 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
662 *cs++ = intel_gt_scratch_offset(engine->gt,
663 INTEL_GT_SCRATCH_FIELD_DEFAULT);
665 *cs++ = MI_LOAD_REGISTER_IMM(1);
666 *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base));
667 *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE);
669 intel_ring_advance(rq, cs);
671 return rq->engine->emit_flush(rq, EMIT_FLUSH);
674 static int mi_set_context(struct i915_request *rq,
675 struct intel_context *ce,
678 struct intel_engine_cs *engine = rq->engine;
679 struct drm_i915_private *i915 = engine->i915;
680 enum intel_engine_id id;
681 const int num_engines =
682 IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0;
683 bool force_restore = false;
688 if (GRAPHICS_VER(i915) == 7)
689 len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
690 else if (GRAPHICS_VER(i915) == 5)
692 if (flags & MI_FORCE_RESTORE) {
693 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
694 flags &= ~MI_FORCE_RESTORE;
695 force_restore = true;
699 cs = intel_ring_begin(rq, len);
703 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
704 if (GRAPHICS_VER(i915) == 7) {
705 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
707 struct intel_engine_cs *signaller;
709 *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
710 for_each_engine(signaller, engine->gt, id) {
711 if (signaller == engine)
714 *cs++ = i915_mmio_reg_offset(
715 RING_PSMI_CTL(signaller->mmio_base));
716 *cs++ = _MASKED_BIT_ENABLE(
717 GEN6_PSMI_SLEEP_MSG_DISABLE);
720 } else if (GRAPHICS_VER(i915) == 5) {
722 * This w/a is only listed for pre-production ilk a/b steppings,
723 * but is also mentioned for programming the powerctx. To be
724 * safe, just apply the workaround; we do not use SyncFlush so
725 * this should never take effect and so be a no-op!
727 *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN;
732 * The HW doesn't handle being told to restore the current
733 * context very well. Quite often it likes goes to go off and
734 * sulk, especially when it is meant to be reloading PP_DIR.
735 * A very simple fix to force the reload is to simply switch
736 * away from the current context and back again.
738 * Note that the kernel_context will contain random state
739 * following the INHIBIT_RESTORE. We accept this since we
740 * never use the kernel_context state; it is merely a
741 * placeholder we use to flush other contexts.
743 *cs++ = MI_SET_CONTEXT;
744 *cs++ = i915_ggtt_offset(engine->kernel_context->state) |
750 *cs++ = MI_SET_CONTEXT;
751 *cs++ = i915_ggtt_offset(ce->state) | flags;
753 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
754 * WaMiSetContext_Hang:snb,ivb,vlv
758 if (GRAPHICS_VER(i915) == 7) {
760 struct intel_engine_cs *signaller;
761 i915_reg_t last_reg = {}; /* keep gcc quiet */
763 *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
764 for_each_engine(signaller, engine->gt, id) {
765 if (signaller == engine)
768 last_reg = RING_PSMI_CTL(signaller->mmio_base);
769 *cs++ = i915_mmio_reg_offset(last_reg);
770 *cs++ = _MASKED_BIT_DISABLE(
771 GEN6_PSMI_SLEEP_MSG_DISABLE);
774 /* Insert a delay before the next switch! */
775 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
776 *cs++ = i915_mmio_reg_offset(last_reg);
777 *cs++ = intel_gt_scratch_offset(engine->gt,
778 INTEL_GT_SCRATCH_FIELD_DEFAULT);
781 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
782 } else if (GRAPHICS_VER(i915) == 5) {
783 *cs++ = MI_SUSPEND_FLUSH;
786 intel_ring_advance(rq, cs);
791 static int remap_l3_slice(struct i915_request *rq, int slice)
793 #define L3LOG_DW (GEN7_L3LOG_SIZE / sizeof(u32))
794 u32 *cs, *remap_info = rq->engine->i915->l3_parity.remap_info[slice];
800 cs = intel_ring_begin(rq, L3LOG_DW * 2 + 2);
805 * Note: We do not worry about the concurrent register cacheline hang
806 * here because no other code should access these registers other than
807 * at initialization time.
809 *cs++ = MI_LOAD_REGISTER_IMM(L3LOG_DW);
810 for (i = 0; i < L3LOG_DW; i++) {
811 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
812 *cs++ = remap_info[i];
815 intel_ring_advance(rq, cs);
821 static int remap_l3(struct i915_request *rq)
823 struct i915_gem_context *ctx = i915_request_gem_context(rq);
826 if (!ctx || !ctx->remap_slice)
829 for (i = 0; i < MAX_L3_SLICES; i++) {
830 if (!(ctx->remap_slice & BIT(i)))
833 err = remap_l3_slice(rq, i);
838 ctx->remap_slice = 0;
842 static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
849 ret = rq->engine->emit_flush(rq, EMIT_FLUSH);
854 * Not only do we need a full barrier (post-sync write) after
855 * invalidating the TLBs, but we need to wait a little bit
856 * longer. Whether this is merely delaying us, or the
857 * subsequent flush is a key part of serialising with the
858 * post-sync op, this extra pass appears vital before a
861 ret = load_pd_dir(rq, vm, PP_DIR_DCLV_2G);
865 return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
868 static int clear_residuals(struct i915_request *rq)
870 struct intel_engine_cs *engine = rq->engine;
873 ret = switch_mm(rq, vm_alias(engine->kernel_context->vm));
877 if (engine->kernel_context->state) {
878 ret = mi_set_context(rq,
879 engine->kernel_context,
880 MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT);
885 ret = engine->emit_bb_start(rq,
886 engine->wa_ctx.vma->node.start, 0,
891 ret = engine->emit_flush(rq, EMIT_FLUSH);
895 /* Always invalidate before the next switch_mm() */
896 return engine->emit_flush(rq, EMIT_INVALIDATE);
899 static int switch_context(struct i915_request *rq)
901 struct intel_engine_cs *engine = rq->engine;
902 struct intel_context *ce = rq->context;
903 void **residuals = NULL;
906 GEM_BUG_ON(HAS_EXECLISTS(engine->i915));
908 if (engine->wa_ctx.vma && ce != engine->kernel_context) {
909 if (engine->wa_ctx.vma->private != ce &&
910 i915_mitigate_clear_residuals()) {
911 ret = clear_residuals(rq);
915 residuals = &engine->wa_ctx.vma->private;
919 ret = switch_mm(rq, vm_alias(ce->vm));
926 GEM_BUG_ON(engine->id != RCS0);
928 /* For resource streamer on HSW+ and power context elsewhere */
929 BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
930 BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN);
932 flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT;
933 if (test_bit(CONTEXT_VALID_BIT, &ce->flags))
934 flags |= MI_RESTORE_EXT_STATE_EN;
936 flags |= MI_RESTORE_INHIBIT;
938 ret = mi_set_context(rq, ce, flags);
948 * Now past the point of no return, this request _will_ be emitted.
950 * Or at least this preamble will be emitted, the request may be
951 * interrupted prior to submitting the user payload. If so, we
952 * still submit the "empty" request in order to preserve global
953 * state tracking such as this, our tracking of the current
957 intel_context_put(*residuals);
958 *residuals = intel_context_get(ce);
964 static int ring_request_alloc(struct i915_request *request)
968 GEM_BUG_ON(!intel_context_is_pinned(request->context));
969 GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb);
972 * Flush enough space to reduce the likelihood of waiting after
973 * we start building the request - in which case we will just
974 * have to repeat work.
976 request->reserved_space += LEGACY_REQUEST_SIZE;
978 /* Unconditionally invalidate GPU caches and TLBs. */
979 ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
983 ret = switch_context(request);
987 request->reserved_space -= LEGACY_REQUEST_SIZE;
991 static void gen6_bsd_submit_request(struct i915_request *request)
993 struct intel_uncore *uncore = request->engine->uncore;
995 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
997 /* Every tail move must follow the sequence below */
999 /* Disable notification that the ring is IDLE. The GT
1000 * will then assume that it is busy and bring it out of rc6.
1002 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
1003 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1005 /* Clear the context id. Here be magic! */
1006 intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
1008 /* Wait for the ring not to be idle, i.e. for it to wake up. */
1009 if (__intel_wait_for_register_fw(uncore,
1010 GEN6_BSD_SLEEP_PSMI_CONTROL,
1011 GEN6_BSD_SLEEP_INDICATOR,
1014 drm_err(&uncore->i915->drm,
1015 "timed out waiting for the BSD ring to wake up\n");
1017 /* Now that the ring is fully powered up, update the tail */
1018 i9xx_submit_request(request);
1020 /* Let the ring send IDLE messages to the GT again,
1021 * and so let it sleep to conserve power when idle.
1023 intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
1024 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
1026 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
1029 static void i9xx_set_default_submission(struct intel_engine_cs *engine)
1031 engine->submit_request = i9xx_submit_request;
1034 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
1036 engine->submit_request = gen6_bsd_submit_request;
1039 static void ring_release(struct intel_engine_cs *engine)
1041 struct drm_i915_private *dev_priv = engine->i915;
1043 drm_WARN_ON(&dev_priv->drm, GRAPHICS_VER(dev_priv) > 2 &&
1044 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
1046 intel_engine_cleanup_common(engine);
1048 if (engine->wa_ctx.vma) {
1049 intel_context_put(engine->wa_ctx.vma->private);
1050 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
1053 intel_ring_unpin(engine->legacy.ring);
1054 intel_ring_put(engine->legacy.ring);
1056 intel_timeline_unpin(engine->legacy.timeline);
1057 intel_timeline_put(engine->legacy.timeline);
1060 static void irq_handler(struct intel_engine_cs *engine, u16 iir)
1062 intel_engine_signal_breadcrumbs(engine);
1065 static void setup_irq(struct intel_engine_cs *engine)
1067 struct drm_i915_private *i915 = engine->i915;
1069 intel_engine_set_irq_handler(engine, irq_handler);
1071 if (GRAPHICS_VER(i915) >= 6) {
1072 engine->irq_enable = gen6_irq_enable;
1073 engine->irq_disable = gen6_irq_disable;
1074 } else if (GRAPHICS_VER(i915) >= 5) {
1075 engine->irq_enable = gen5_irq_enable;
1076 engine->irq_disable = gen5_irq_disable;
1077 } else if (GRAPHICS_VER(i915) >= 3) {
1078 engine->irq_enable = gen3_irq_enable;
1079 engine->irq_disable = gen3_irq_disable;
1081 engine->irq_enable = gen2_irq_enable;
1082 engine->irq_disable = gen2_irq_disable;
1086 static void add_to_engine(struct i915_request *rq)
1088 lockdep_assert_held(&rq->engine->sched_engine->lock);
1089 list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
1092 static void remove_from_engine(struct i915_request *rq)
1094 spin_lock_irq(&rq->engine->sched_engine->lock);
1095 list_del_init(&rq->sched.link);
1097 /* Prevent further __await_execution() registering a cb, then flush */
1098 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
1100 spin_unlock_irq(&rq->engine->sched_engine->lock);
1102 i915_request_notify_execute_cb_imm(rq);
1105 static void setup_common(struct intel_engine_cs *engine)
1107 struct drm_i915_private *i915 = engine->i915;
1109 /* gen8+ are only supported with execlists */
1110 GEM_BUG_ON(GRAPHICS_VER(i915) >= 8);
1114 engine->resume = xcs_resume;
1115 engine->sanitize = xcs_sanitize;
1117 engine->reset.prepare = reset_prepare;
1118 engine->reset.rewind = reset_rewind;
1119 engine->reset.cancel = reset_cancel;
1120 engine->reset.finish = reset_finish;
1122 engine->add_active_request = add_to_engine;
1123 engine->remove_active_request = remove_from_engine;
1125 engine->cops = &ring_context_ops;
1126 engine->request_alloc = ring_request_alloc;
1129 * Using a global execution timeline; the previous final breadcrumb is
1130 * equivalent to our next initial bread so we can elide
1131 * engine->emit_init_breadcrumb().
1133 engine->emit_fini_breadcrumb = gen3_emit_breadcrumb;
1134 if (GRAPHICS_VER(i915) == 5)
1135 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb;
1137 engine->set_default_submission = i9xx_set_default_submission;
1139 if (GRAPHICS_VER(i915) >= 6)
1140 engine->emit_bb_start = gen6_emit_bb_start;
1141 else if (GRAPHICS_VER(i915) >= 4)
1142 engine->emit_bb_start = gen4_emit_bb_start;
1143 else if (IS_I830(i915) || IS_I845G(i915))
1144 engine->emit_bb_start = i830_emit_bb_start;
1146 engine->emit_bb_start = gen3_emit_bb_start;
1149 static void setup_rcs(struct intel_engine_cs *engine)
1151 struct drm_i915_private *i915 = engine->i915;
1153 if (HAS_L3_DPF(i915))
1154 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
1156 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
1158 if (GRAPHICS_VER(i915) >= 7) {
1159 engine->emit_flush = gen7_emit_flush_rcs;
1160 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs;
1161 } else if (GRAPHICS_VER(i915) == 6) {
1162 engine->emit_flush = gen6_emit_flush_rcs;
1163 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs;
1164 } else if (GRAPHICS_VER(i915) == 5) {
1165 engine->emit_flush = gen4_emit_flush_rcs;
1167 if (GRAPHICS_VER(i915) < 4)
1168 engine->emit_flush = gen2_emit_flush;
1170 engine->emit_flush = gen4_emit_flush_rcs;
1171 engine->irq_enable_mask = I915_USER_INTERRUPT;
1174 if (IS_HASWELL(i915))
1175 engine->emit_bb_start = hsw_emit_bb_start;
1178 static void setup_vcs(struct intel_engine_cs *engine)
1180 struct drm_i915_private *i915 = engine->i915;
1182 if (GRAPHICS_VER(i915) >= 6) {
1183 /* gen6 bsd needs a special wa for tail updates */
1184 if (GRAPHICS_VER(i915) == 6)
1185 engine->set_default_submission = gen6_bsd_set_default_submission;
1186 engine->emit_flush = gen6_emit_flush_vcs;
1187 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1189 if (GRAPHICS_VER(i915) == 6)
1190 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
1192 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
1194 engine->emit_flush = gen4_emit_flush_vcs;
1195 if (GRAPHICS_VER(i915) == 5)
1196 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
1198 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
1202 static void setup_bcs(struct intel_engine_cs *engine)
1204 struct drm_i915_private *i915 = engine->i915;
1206 engine->emit_flush = gen6_emit_flush_xcs;
1207 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
1209 if (GRAPHICS_VER(i915) == 6)
1210 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs;
1212 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
1215 static void setup_vecs(struct intel_engine_cs *engine)
1217 struct drm_i915_private *i915 = engine->i915;
1219 GEM_BUG_ON(GRAPHICS_VER(i915) < 7);
1221 engine->emit_flush = gen6_emit_flush_xcs;
1222 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
1223 engine->irq_enable = hsw_irq_enable_vecs;
1224 engine->irq_disable = hsw_irq_disable_vecs;
1226 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs;
1229 static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,
1230 struct i915_vma * const vma)
1232 return gen7_setup_clear_gpr_bb(engine, vma);
1235 static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine,
1236 struct i915_gem_ww_ctx *ww,
1237 struct i915_vma *vma)
1241 err = i915_vma_pin_ww(vma, ww, 0, 0, PIN_USER | PIN_HIGH);
1245 err = i915_vma_sync(vma);
1249 err = gen7_ctx_switch_bb_setup(engine, vma);
1253 engine->wa_ctx.vma = vma;
1257 i915_vma_unpin(vma);
1261 static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine)
1263 struct drm_i915_gem_object *obj;
1264 struct i915_vma *vma;
1267 if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS)
1270 err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
1272 return ERR_PTR(err);
1276 size = ALIGN(err, PAGE_SIZE);
1278 obj = i915_gem_object_create_internal(engine->i915, size);
1280 return ERR_CAST(obj);
1282 vma = i915_vma_instance(obj, engine->gt->vm, NULL);
1284 i915_gem_object_put(obj);
1285 return ERR_CAST(vma);
1288 vma->private = intel_context_create(engine); /* dummy residuals */
1289 if (IS_ERR(vma->private)) {
1290 err = PTR_ERR(vma->private);
1291 vma->private = NULL;
1292 i915_gem_object_put(obj);
1293 return ERR_PTR(err);
1299 int intel_ring_submission_setup(struct intel_engine_cs *engine)
1301 struct i915_gem_ww_ctx ww;
1302 struct intel_timeline *timeline;
1303 struct intel_ring *ring;
1304 struct i915_vma *gen7_wa_vma;
1307 setup_common(engine);
1309 switch (engine->class) {
1313 case VIDEO_DECODE_CLASS:
1316 case COPY_ENGINE_CLASS:
1319 case VIDEO_ENHANCEMENT_CLASS:
1323 MISSING_CASE(engine->class);
1327 timeline = intel_timeline_create_from_engine(engine,
1328 I915_GEM_HWS_SEQNO_ADDR);
1329 if (IS_ERR(timeline)) {
1330 err = PTR_ERR(timeline);
1333 GEM_BUG_ON(timeline->has_initial_breadcrumb);
1335 ring = intel_engine_create_ring(engine, SZ_16K);
1337 err = PTR_ERR(ring);
1341 GEM_BUG_ON(engine->legacy.ring);
1342 engine->legacy.ring = ring;
1343 engine->legacy.timeline = timeline;
1345 gen7_wa_vma = gen7_ctx_vma(engine);
1346 if (IS_ERR(gen7_wa_vma)) {
1347 err = PTR_ERR(gen7_wa_vma);
1351 i915_gem_ww_ctx_init(&ww, false);
1354 err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww);
1355 if (!err && gen7_wa_vma)
1356 err = i915_gem_object_lock(gen7_wa_vma->obj, &ww);
1357 if (!err && engine->legacy.ring->vma->obj)
1358 err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww);
1360 err = intel_timeline_pin(timeline, &ww);
1362 err = intel_ring_pin(ring, &ww);
1364 intel_timeline_unpin(timeline);
1369 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
1372 err = gen7_ctx_switch_bb_init(engine, &ww, gen7_wa_vma);
1374 intel_ring_unpin(ring);
1375 intel_timeline_unpin(timeline);
1380 if (err == -EDEADLK) {
1381 err = i915_gem_ww_ctx_backoff(&ww);
1385 i915_gem_ww_ctx_fini(&ww);
1389 /* Finally, take ownership and responsibility for cleanup! */
1390 engine->release = ring_release;
1396 intel_context_put(gen7_wa_vma->private);
1397 i915_gem_object_put(gen7_wa_vma->obj);
1400 intel_ring_put(ring);
1402 intel_timeline_put(timeline);
1404 intel_engine_cleanup_common(engine);
1408 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1409 #include "selftest_ring_submission.c"