1 // SPDX-License-Identifier: MIT
3 * Copyright © 2014 Intel Corporation
6 #include "gen8_engine_cs.h"
8 #include "intel_engine_regs.h"
9 #include "intel_gpu_commands.h"
10 #include "intel_lrc.h"
11 #include "intel_ring.h"
13 int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
15 bool vf_flush_wa = false, dc_flush_wa = false;
19 flags |= PIPE_CONTROL_CS_STALL;
21 if (mode & EMIT_FLUSH) {
22 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
23 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
24 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
25 flags |= PIPE_CONTROL_FLUSH_ENABLE;
28 if (mode & EMIT_INVALIDATE) {
29 flags |= PIPE_CONTROL_TLB_INVALIDATE;
30 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
31 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
32 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
33 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
34 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
35 flags |= PIPE_CONTROL_QW_WRITE;
36 flags |= PIPE_CONTROL_STORE_DATA_INDEX;
39 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
42 if (GRAPHICS_VER(rq->i915) == 9)
45 /* WaForGAMHang:kbl */
46 if (IS_KABYLAKE(rq->i915) && IS_GRAPHICS_STEP(rq->i915, 0, STEP_C0))
58 cs = intel_ring_begin(rq, len);
63 cs = gen8_emit_pipe_control(cs, 0, 0);
66 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
69 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
72 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
74 intel_ring_advance(rq, cs);
79 int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode)
83 cs = intel_ring_begin(rq, 4);
87 cmd = MI_FLUSH_DW + 1;
90 * We always require a command barrier so that subsequent
91 * commands, such as breadcrumb interrupts, are strictly ordered
92 * wrt the contents of the write cache being flushed to memory
93 * (and thus being coherent from the CPU).
95 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
97 if (mode & EMIT_INVALIDATE) {
98 cmd |= MI_INVALIDATE_TLB;
99 if (rq->engine->class == VIDEO_DECODE_CLASS)
100 cmd |= MI_INVALIDATE_BSD;
104 *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
105 *cs++ = 0; /* upper addr */
106 *cs++ = 0; /* value */
107 intel_ring_advance(rq, cs);
112 int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode)
114 if (mode & EMIT_FLUSH) {
118 flags |= PIPE_CONTROL_CS_STALL;
120 flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
121 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
122 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
123 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
124 flags |= PIPE_CONTROL_FLUSH_ENABLE;
125 flags |= PIPE_CONTROL_QW_WRITE;
126 flags |= PIPE_CONTROL_STORE_DATA_INDEX;
128 cs = intel_ring_begin(rq, 6);
132 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
133 intel_ring_advance(rq, cs);
136 if (mode & EMIT_INVALIDATE) {
140 flags |= PIPE_CONTROL_CS_STALL;
142 flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
143 flags |= PIPE_CONTROL_TLB_INVALIDATE;
144 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
145 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
146 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
147 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
148 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
149 flags |= PIPE_CONTROL_QW_WRITE;
150 flags |= PIPE_CONTROL_STORE_DATA_INDEX;
152 cs = intel_ring_begin(rq, 6);
156 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
157 intel_ring_advance(rq, cs);
163 static u32 preparser_disable(bool state)
165 return MI_ARB_CHECK | 1 << 8 | state;
168 static i915_reg_t gen12_get_aux_inv_reg(struct intel_engine_cs *engine)
170 switch (engine->id) {
172 return GEN12_CCS_AUX_INV;
174 return GEN12_BCS0_AUX_INV;
176 return GEN12_VD0_AUX_INV;
178 return GEN12_VD2_AUX_INV;
180 return GEN12_VE0_AUX_INV;
182 return GEN12_CCS0_AUX_INV;
184 return INVALID_MMIO_REG;
188 static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine)
190 i915_reg_t reg = gen12_get_aux_inv_reg(engine);
192 if (IS_PONTEVECCHIO(engine->i915))
196 * So far platforms supported by i915 having flat ccs do not require
197 * AUX invalidation. Check also whether the engine requires it.
199 return i915_mmio_reg_valid(reg) && !HAS_FLAT_CCS(engine->i915);
202 u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs)
204 i915_reg_t inv_reg = gen12_get_aux_inv_reg(engine);
205 u32 gsi_offset = engine->gt->uncore->gsi_offset;
207 if (!gen12_needs_ccs_aux_inv(engine))
210 *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
211 *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
214 *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
215 MI_SEMAPHORE_REGISTER_POLL |
217 MI_SEMAPHORE_SAD_EQ_SDD;
219 *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
226 static int mtl_dummy_pipe_control(struct i915_request *rq)
229 if (IS_MTL_GRAPHICS_STEP(rq->i915, M, STEP_A0, STEP_B0) ||
230 IS_MTL_GRAPHICS_STEP(rq->i915, P, STEP_A0, STEP_B0)) {
233 /* dummy PIPE_CONTROL + depth flush */
234 cs = intel_ring_begin(rq, 6);
237 cs = gen12_emit_pipe_control(cs,
239 PIPE_CONTROL_DEPTH_CACHE_FLUSH,
240 LRC_PPHWSP_SCRATCH_ADDR);
241 intel_ring_advance(rq, cs);
247 int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
249 struct intel_engine_cs *engine = rq->engine;
252 * On Aux CCS platforms the invalidation of the Aux
253 * table requires quiescing memory traffic beforehand
255 if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) {
261 err = mtl_dummy_pipe_control(rq);
265 bit_group_0 |= PIPE_CONTROL0_HDC_PIPELINE_FLUSH;
268 * When required, in MTL and beyond platforms we
269 * need to set the CCS_FLUSH bit in the pipe control
271 if (GRAPHICS_VER_FULL(rq->i915) >= IP_VER(12, 70))
272 bit_group_0 |= PIPE_CONTROL_CCS_FLUSH;
274 bit_group_1 |= PIPE_CONTROL_TILE_CACHE_FLUSH;
275 bit_group_1 |= PIPE_CONTROL_FLUSH_L3;
276 bit_group_1 |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
277 bit_group_1 |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
278 /* Wa_1409600907:tgl,adl-p */
279 bit_group_1 |= PIPE_CONTROL_DEPTH_STALL;
280 bit_group_1 |= PIPE_CONTROL_DC_FLUSH_ENABLE;
281 bit_group_1 |= PIPE_CONTROL_FLUSH_ENABLE;
283 bit_group_1 |= PIPE_CONTROL_STORE_DATA_INDEX;
284 bit_group_1 |= PIPE_CONTROL_QW_WRITE;
286 bit_group_1 |= PIPE_CONTROL_CS_STALL;
288 if (!HAS_3D_PIPELINE(engine->i915))
289 bit_group_1 &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
290 else if (engine->class == COMPUTE_CLASS)
291 bit_group_1 &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
293 cs = intel_ring_begin(rq, 6);
297 cs = gen12_emit_pipe_control(cs, bit_group_0, bit_group_1,
298 LRC_PPHWSP_SCRATCH_ADDR);
299 intel_ring_advance(rq, cs);
302 if (mode & EMIT_INVALIDATE) {
307 err = mtl_dummy_pipe_control(rq);
311 flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
312 flags |= PIPE_CONTROL_TLB_INVALIDATE;
313 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
314 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
315 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
316 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
317 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
319 flags |= PIPE_CONTROL_STORE_DATA_INDEX;
320 flags |= PIPE_CONTROL_QW_WRITE;
322 flags |= PIPE_CONTROL_CS_STALL;
324 if (!HAS_3D_PIPELINE(engine->i915))
325 flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
326 else if (engine->class == COMPUTE_CLASS)
327 flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
330 if (gen12_needs_ccs_aux_inv(rq->engine))
333 cs = intel_ring_begin(rq, count);
338 * Prevent the pre-parser from skipping past the TLB
339 * invalidate and loading a stale page for the batch
340 * buffer / request payload.
342 *cs++ = preparser_disable(true);
344 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
346 cs = gen12_emit_aux_table_inv(engine, cs);
348 *cs++ = preparser_disable(false);
349 intel_ring_advance(rq, cs);
355 int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
360 if (mode & EMIT_INVALIDATE) {
363 if (gen12_needs_ccs_aux_inv(rq->engine))
367 cs = intel_ring_begin(rq, cmd);
371 if (mode & EMIT_INVALIDATE)
372 *cs++ = preparser_disable(true);
374 cmd = MI_FLUSH_DW + 1;
377 * We always require a command barrier so that subsequent
378 * commands, such as breadcrumb interrupts, are strictly ordered
379 * wrt the contents of the write cache being flushed to memory
380 * (and thus being coherent from the CPU).
382 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
384 if (mode & EMIT_INVALIDATE) {
385 cmd |= MI_INVALIDATE_TLB;
386 if (rq->engine->class == VIDEO_DECODE_CLASS)
387 cmd |= MI_INVALIDATE_BSD;
389 if (gen12_needs_ccs_aux_inv(rq->engine) &&
390 rq->engine->class == COPY_ENGINE_CLASS)
391 cmd |= MI_FLUSH_DW_CCS;
395 *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
396 *cs++ = 0; /* upper addr */
397 *cs++ = 0; /* value */
399 cs = gen12_emit_aux_table_inv(rq->engine, cs);
401 if (mode & EMIT_INVALIDATE)
402 *cs++ = preparser_disable(false);
404 intel_ring_advance(rq, cs);
409 static u32 preempt_address(struct intel_engine_cs *engine)
411 return (i915_ggtt_offset(engine->status_page.vma) +
412 I915_GEM_HWS_PREEMPT_ADDR);
415 static u32 hwsp_offset(const struct i915_request *rq)
417 const struct intel_timeline *tl;
419 /* Before the request is executed, the timeline is fixed */
420 tl = rcu_dereference_protected(rq->timeline,
421 !i915_request_signaled(rq));
423 /* See the comment in i915_request_active_seqno(). */
424 return page_mask_bits(tl->hwsp_offset) + offset_in_page(rq->hwsp_seqno);
427 int gen8_emit_init_breadcrumb(struct i915_request *rq)
431 GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
432 if (!i915_request_timeline(rq)->has_initial_breadcrumb)
435 cs = intel_ring_begin(rq, 6);
439 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
440 *cs++ = hwsp_offset(rq);
442 *cs++ = rq->fence.seqno - 1;
445 * Check if we have been preempted before we even get started.
447 * After this point i915_request_started() reports true, even if
448 * we get preempted and so are no longer running.
450 * i915_request_started() is used during preemption processing
451 * to decide if the request is currently inside the user payload
452 * or spinning on a kernel semaphore (or earlier). For no-preemption
453 * requests, we do allow preemption on the semaphore before the user
454 * payload, but do not allow preemption once the request is started.
456 * i915_request_started() is similarly used during GPU hangs to
457 * determine if the user's payload was guilty, and if so, the
458 * request is banned. Before the request is started, it is assumed
459 * to be unharmed and an innocent victim of another's hang.
462 *cs++ = MI_ARB_CHECK;
464 intel_ring_advance(rq, cs);
466 /* Record the updated position of the request's payload */
467 rq->infix = intel_ring_offset(rq, cs);
469 __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
474 static int __xehp_emit_bb_start(struct i915_request *rq,
476 const unsigned int flags,
479 struct intel_context *ce = rq->context;
480 u32 wa_offset = lrc_indirect_bb(ce);
483 GEM_BUG_ON(!ce->wa_bb_page);
485 cs = intel_ring_begin(rq, 12);
489 *cs++ = MI_ARB_ON_OFF | arb;
491 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
492 MI_SRM_LRM_GLOBAL_GTT |
494 *cs++ = i915_mmio_reg_offset(RING_PREDICATE_RESULT(0));
495 *cs++ = wa_offset + DG2_PREDICATE_RESULT_WA;
498 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
499 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
500 *cs++ = lower_32_bits(offset);
501 *cs++ = upper_32_bits(offset);
503 /* Fixup stray MI_SET_PREDICATE as it prevents us executing the ring */
504 *cs++ = MI_BATCH_BUFFER_START_GEN8;
505 *cs++ = wa_offset + DG2_PREDICATE_RESULT_BB;
508 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
510 intel_ring_advance(rq, cs);
515 int xehp_emit_bb_start_noarb(struct i915_request *rq,
517 const unsigned int flags)
519 return __xehp_emit_bb_start(rq, offset, len, flags, MI_ARB_DISABLE);
522 int xehp_emit_bb_start(struct i915_request *rq,
524 const unsigned int flags)
526 return __xehp_emit_bb_start(rq, offset, len, flags, MI_ARB_ENABLE);
529 int gen8_emit_bb_start_noarb(struct i915_request *rq,
531 const unsigned int flags)
535 cs = intel_ring_begin(rq, 4);
540 * WaDisableCtxRestoreArbitration:bdw,chv
542 * We don't need to perform MI_ARB_ENABLE as often as we do (in
543 * particular all the gen that do not need the w/a at all!), if we
544 * took care to make sure that on every switch into this context
545 * (both ordinary and for preemption) that arbitrartion was enabled
546 * we would be fine. However, for gen8 there is another w/a that
547 * requires us to not preempt inside GPGPU execution, so we keep
548 * arbitration disabled for gen8 batches. Arbitration will be
549 * re-enabled before we close the request
550 * (engine->emit_fini_breadcrumb).
552 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
554 /* FIXME(BDW+): Address space and security selectors. */
555 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
556 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
557 *cs++ = lower_32_bits(offset);
558 *cs++ = upper_32_bits(offset);
560 intel_ring_advance(rq, cs);
565 int gen8_emit_bb_start(struct i915_request *rq,
567 const unsigned int flags)
571 if (unlikely(i915_request_has_nopreempt(rq)))
572 return gen8_emit_bb_start_noarb(rq, offset, len, flags);
574 cs = intel_ring_begin(rq, 6);
578 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
580 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
581 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
582 *cs++ = lower_32_bits(offset);
583 *cs++ = upper_32_bits(offset);
585 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
588 intel_ring_advance(rq, cs);
593 static void assert_request_valid(struct i915_request *rq)
595 struct intel_ring *ring __maybe_unused = rq->ring;
597 /* Can we unwind this request without appearing to go forwards? */
598 GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
602 * Reserve space for 2 NOOPs at the end of each request to be
603 * used as a workaround for not being allowed to do lite
604 * restore with HEAD==TAIL (WaIdleLiteRestore).
606 static u32 *gen8_emit_wa_tail(struct i915_request *rq, u32 *cs)
608 /* Ensure there's always at least one preemption point per-request. */
609 *cs++ = MI_ARB_CHECK;
611 rq->wa_tail = intel_ring_offset(rq, cs);
613 /* Check that entire request is less than half the ring */
614 assert_request_valid(rq);
619 static u32 *emit_preempt_busywait(struct i915_request *rq, u32 *cs)
621 *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
622 *cs++ = MI_SEMAPHORE_WAIT |
623 MI_SEMAPHORE_GLOBAL_GTT |
625 MI_SEMAPHORE_SAD_EQ_SDD;
627 *cs++ = preempt_address(rq->engine);
634 static __always_inline u32*
635 gen8_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
637 *cs++ = MI_USER_INTERRUPT;
639 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
640 if (intel_engine_has_semaphores(rq->engine) &&
641 !intel_uc_uses_guc_submission(&rq->engine->gt->uc))
642 cs = emit_preempt_busywait(rq, cs);
644 rq->tail = intel_ring_offset(rq, cs);
645 assert_ring_tail_valid(rq->ring, rq->tail);
647 return gen8_emit_wa_tail(rq, cs);
650 static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
652 return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
655 u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
657 return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
660 u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
662 cs = gen8_emit_pipe_control(cs,
663 PIPE_CONTROL_CS_STALL |
664 PIPE_CONTROL_TLB_INVALIDATE |
665 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
666 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
667 PIPE_CONTROL_DC_FLUSH_ENABLE,
670 /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
671 cs = gen8_emit_ggtt_write_rcs(cs,
674 PIPE_CONTROL_FLUSH_ENABLE |
675 PIPE_CONTROL_CS_STALL);
677 return gen8_emit_fini_breadcrumb_tail(rq, cs);
680 u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
682 cs = gen8_emit_pipe_control(cs,
683 PIPE_CONTROL_CS_STALL |
684 PIPE_CONTROL_TLB_INVALIDATE |
685 PIPE_CONTROL_TILE_CACHE_FLUSH |
686 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
687 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
688 PIPE_CONTROL_DC_FLUSH_ENABLE,
691 /*XXX: Look at gen8_emit_fini_breadcrumb_rcs */
692 cs = gen8_emit_ggtt_write_rcs(cs,
695 PIPE_CONTROL_FLUSH_ENABLE |
696 PIPE_CONTROL_CS_STALL);
698 return gen8_emit_fini_breadcrumb_tail(rq, cs);
702 * Note that the CS instruction pre-parser will not stall on the breadcrumb
703 * flush and will continue pre-fetching the instructions after it before the
704 * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
705 * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
706 * of the next request before the memory has been flushed, we're guaranteed that
707 * we won't access the batch itself too early.
708 * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
709 * so, if the current request is modifying an instruction in the next request on
710 * the same intel_context, we might pre-fetch and then execute the pre-update
711 * instruction. To avoid this, the users of self-modifying code should either
712 * disable the parser around the code emitting the memory writes, via a new flag
713 * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
714 * the in-kernel use-cases we've opted to use a separate context, see
715 * reloc_gpu() as an example.
716 * All the above applies only to the instructions themselves. Non-inline data
717 * used by the instructions is not pre-fetched.
720 static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs)
722 *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
723 *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
724 MI_SEMAPHORE_GLOBAL_GTT |
726 MI_SEMAPHORE_SAD_EQ_SDD;
728 *cs++ = preempt_address(rq->engine);
735 /* Wa_14014475959:dg2 */
736 #define CCS_SEMAPHORE_PPHWSP_OFFSET 0x540
737 static u32 ccs_semaphore_offset(struct i915_request *rq)
739 return i915_ggtt_offset(rq->context->state) +
740 (LRC_PPHWSP_PN * PAGE_SIZE) + CCS_SEMAPHORE_PPHWSP_OFFSET;
743 /* Wa_14014475959:dg2 */
744 static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs)
748 *cs++ = MI_ATOMIC_INLINE | MI_ATOMIC_GLOBAL_GTT | MI_ATOMIC_CS_STALL |
750 *cs++ = ccs_semaphore_offset(rq);
755 * When MI_ATOMIC_INLINE_DATA set this command must be 11 DW + (1 NOP)
756 * to align. 4 DWs above + 8 filler DWs here.
758 for (i = 0; i < 8; ++i)
761 *cs++ = MI_SEMAPHORE_WAIT |
762 MI_SEMAPHORE_GLOBAL_GTT |
764 MI_SEMAPHORE_SAD_EQ_SDD;
766 *cs++ = ccs_semaphore_offset(rq);
772 static __always_inline u32*
773 gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
775 *cs++ = MI_USER_INTERRUPT;
777 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
778 if (intel_engine_has_semaphores(rq->engine) &&
779 !intel_uc_uses_guc_submission(&rq->engine->gt->uc))
780 cs = gen12_emit_preempt_busywait(rq, cs);
782 /* Wa_14014475959:dg2 */
783 if (intel_engine_uses_wa_hold_ccs_switchout(rq->engine))
784 cs = ccs_emit_wa_busywait(rq, cs);
786 rq->tail = intel_ring_offset(rq, cs);
787 assert_ring_tail_valid(rq->ring, rq->tail);
789 return gen8_emit_wa_tail(rq, cs);
792 u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
794 /* XXX Stalling flush before seqno write; post-sync not */
795 cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
796 return gen12_emit_fini_breadcrumb_tail(rq, cs);
799 u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
801 struct drm_i915_private *i915 = rq->i915;
802 u32 flags = (PIPE_CONTROL_CS_STALL |
803 PIPE_CONTROL_TLB_INVALIDATE |
804 PIPE_CONTROL_TILE_CACHE_FLUSH |
805 PIPE_CONTROL_FLUSH_L3 |
806 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
807 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
808 PIPE_CONTROL_DC_FLUSH_ENABLE |
809 PIPE_CONTROL_FLUSH_ENABLE);
812 if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
813 IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
814 /* dummy PIPE_CONTROL + depth flush */
815 cs = gen12_emit_pipe_control(cs, 0,
816 PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0);
818 if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
820 flags |= PIPE_CONTROL_DEPTH_STALL;
822 if (!HAS_3D_PIPELINE(rq->i915))
823 flags &= ~PIPE_CONTROL_3D_ARCH_FLAGS;
824 else if (rq->engine->class == COMPUTE_CLASS)
825 flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS;
827 cs = gen12_emit_pipe_control(cs, PIPE_CONTROL0_HDC_PIPELINE_FLUSH, flags, 0);
829 /*XXX: Look at gen8_emit_fini_breadcrumb_rcs */
830 cs = gen12_emit_ggtt_write_rcs(cs,
834 PIPE_CONTROL_FLUSH_ENABLE |
835 PIPE_CONTROL_CS_STALL);
837 return gen12_emit_fini_breadcrumb_tail(rq, cs);