1 // SPDX-License-Identifier: MIT
3 * Copyright © 2014 Intel Corporation
6 #include "gen8_engine_cs.h"
9 #include "intel_engine.h"
10 #include "intel_gpu_commands.h"
12 #include "intel_lrc.h"
13 #include "intel_lrc_reg.h"
14 #include "intel_ring.h"
15 #include "shmem_utils.h"
17 static inline unsigned int dword_in_page(void *addr)
19 return offset_in_page(addr) / sizeof(u32);
22 static void set_offsets(u32 *regs,
24 const struct intel_engine_cs *engine,
26 #define NOP(x) (BIT(7) | (x))
27 #define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6)))
29 #define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200))
31 (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \
35 const u32 base = engine->mmio_base;
40 if (*data & BIT(7)) { /* skip */
41 count = *data++ & ~BIT(7);
50 *regs = MI_LOAD_REGISTER_IMM(count);
52 *regs |= MI_LRI_FORCE_POSTED;
53 if (INTEL_GEN(engine->i915) >= 11)
54 *regs |= MI_LRI_LRM_CS_MMIO;
65 offset |= v & ~BIT(7);
68 regs[0] = base + (offset << 2);
74 /* Close the batch; used mainly by live_lrc_layout() */
75 *regs = MI_BATCH_BUFFER_END;
76 if (INTEL_GEN(engine->i915) >= 10)
81 static const u8 gen8_xcs_offsets[] = {
116 static const u8 gen9_xcs_offsets[] = {
200 static const u8 gen12_xcs_offsets[] = {
232 static const u8 gen8_rcs_offsets[] = {
269 static const u8 gen9_rcs_offsets[] = {
353 static const u8 gen11_rcs_offsets[] = {
394 static const u8 gen12_rcs_offsets[] = {
496 static const u8 *reg_offsets(const struct intel_engine_cs *engine)
499 * The gen12+ lists only have the registers we program in the basic
500 * default state. We rely on the context image using relative
501 * addressing to automatic fixup the register state between the
502 * physical engines for virtual engine.
504 GEM_BUG_ON(INTEL_GEN(engine->i915) >= 12 &&
505 !intel_engine_has_relative_mmio(engine));
507 if (engine->class == RENDER_CLASS) {
508 if (INTEL_GEN(engine->i915) >= 12)
509 return gen12_rcs_offsets;
510 else if (INTEL_GEN(engine->i915) >= 11)
511 return gen11_rcs_offsets;
512 else if (INTEL_GEN(engine->i915) >= 9)
513 return gen9_rcs_offsets;
515 return gen8_rcs_offsets;
517 if (INTEL_GEN(engine->i915) >= 12)
518 return gen12_xcs_offsets;
519 else if (INTEL_GEN(engine->i915) >= 9)
520 return gen9_xcs_offsets;
522 return gen8_xcs_offsets;
526 static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
528 if (INTEL_GEN(engine->i915) >= 12)
530 else if (INTEL_GEN(engine->i915) >= 9)
532 else if (engine->class == RENDER_CLASS)
538 static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
540 if (INTEL_GEN(engine->i915) >= 12)
542 else if (INTEL_GEN(engine->i915) >= 9)
544 else if (engine->class == RENDER_CLASS)
550 static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine)
552 if (INTEL_GEN(engine->i915) >= 12)
554 else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS)
560 static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine)
564 x = lrc_ring_wa_bb_per_ctx(engine);
571 static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
575 x = lrc_ring_indirect_ptr(engine);
582 static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
584 if (engine->class != RENDER_CLASS)
587 if (INTEL_GEN(engine->i915) >= 12)
589 else if (INTEL_GEN(engine->i915) >= 11)
596 lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
598 switch (INTEL_GEN(engine->i915)) {
600 MISSING_CASE(INTEL_GEN(engine->i915));
603 return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
605 return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
607 return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
609 return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
611 return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
616 lrc_setup_indirect_ctx(u32 *regs,
617 const struct intel_engine_cs *engine,
618 u32 ctx_bb_ggtt_addr,
622 GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES));
623 GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1);
624 regs[lrc_ring_indirect_ptr(engine) + 1] =
625 ctx_bb_ggtt_addr | (size / CACHELINE_BYTES);
627 GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1);
628 regs[lrc_ring_indirect_offset(engine) + 1] =
629 lrc_ring_indirect_offset_default(engine) << 6;
632 static void init_common_regs(u32 * const regs,
633 const struct intel_context *ce,
634 const struct intel_engine_cs *engine,
639 ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
640 ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
642 ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
643 if (INTEL_GEN(engine->i915) < 11)
644 ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
645 CTX_CTRL_RS_CTX_ENABLE);
646 regs[CTX_CONTEXT_CONTROL] = ctl;
648 regs[CTX_TIMESTAMP] = ce->runtime.last;
651 static void init_wa_bb_regs(u32 * const regs,
652 const struct intel_engine_cs *engine)
654 const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx;
656 if (wa_ctx->per_ctx.size) {
657 const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
659 GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
660 regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
661 (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
664 if (wa_ctx->indirect_ctx.size) {
665 lrc_setup_indirect_ctx(regs, engine,
666 i915_ggtt_offset(wa_ctx->vma) +
667 wa_ctx->indirect_ctx.offset,
668 wa_ctx->indirect_ctx.size);
672 static void init_ppgtt_regs(u32 *regs, const struct i915_ppgtt *ppgtt)
674 if (i915_vm_is_4lvl(&ppgtt->vm)) {
675 /* 64b PPGTT (48bit canonical)
676 * PDP0_DESCRIPTOR contains the base address to PML4 and
677 * other PDP Descriptors are ignored.
679 ASSIGN_CTX_PML4(ppgtt, regs);
681 ASSIGN_CTX_PDP(ppgtt, regs, 3);
682 ASSIGN_CTX_PDP(ppgtt, regs, 2);
683 ASSIGN_CTX_PDP(ppgtt, regs, 1);
684 ASSIGN_CTX_PDP(ppgtt, regs, 0);
688 static struct i915_ppgtt *vm_alias(struct i915_address_space *vm)
690 if (i915_is_ggtt(vm))
691 return i915_vm_to_ggtt(vm)->alias;
693 return i915_vm_to_ppgtt(vm);
696 static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine)
700 x = lrc_ring_mi_mode(engine);
702 regs[x + 1] &= ~STOP_RING;
703 regs[x + 1] |= STOP_RING << 16;
707 static void __lrc_init_regs(u32 *regs,
708 const struct intel_context *ce,
709 const struct intel_engine_cs *engine,
713 * A context is actually a big batch buffer with several
714 * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
715 * values we are setting here are only for the first context restore:
716 * on a subsequent save, the GPU will recreate this batchbuffer with new
717 * values (including all the missing MI_LOAD_REGISTER_IMM commands that
718 * we are not initializing here).
720 * Must keep consistent with virtual_update_register_offsets().
724 memset(regs, 0, PAGE_SIZE);
726 set_offsets(regs, reg_offsets(engine), engine, inhibit);
728 init_common_regs(regs, ce, engine, inhibit);
729 init_ppgtt_regs(regs, vm_alias(ce->vm));
731 init_wa_bb_regs(regs, engine);
733 __reset_stop_ring(regs, engine);
736 void lrc_init_regs(const struct intel_context *ce,
737 const struct intel_engine_cs *engine,
740 __lrc_init_regs(ce->lrc_reg_state, ce, engine, inhibit);
743 void lrc_reset_regs(const struct intel_context *ce,
744 const struct intel_engine_cs *engine)
746 __reset_stop_ring(ce->lrc_reg_state, engine);
750 set_redzone(void *vaddr, const struct intel_engine_cs *engine)
752 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
755 vaddr += engine->context_size;
757 memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
761 check_redzone(const void *vaddr, const struct intel_engine_cs *engine)
763 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
766 vaddr += engine->context_size;
768 if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
769 drm_err_once(&engine->i915->drm,
770 "%s context redzone overwritten!\n",
774 void lrc_init_state(struct intel_context *ce,
775 struct intel_engine_cs *engine,
780 set_redzone(state, engine);
782 if (engine->default_state) {
783 shmem_read(engine->default_state, 0,
784 state, engine->context_size);
785 __set_bit(CONTEXT_VALID_BIT, &ce->flags);
789 /* Clear the ppHWSP (inc. per-context counters) */
790 memset(state, 0, PAGE_SIZE);
793 * The second page of the context object contains some registers which
794 * must be set up prior to the first execution.
796 __lrc_init_regs(state + LRC_STATE_OFFSET, ce, engine, inhibit);
799 static struct i915_vma *
800 __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
802 struct drm_i915_gem_object *obj;
803 struct i915_vma *vma;
806 context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
808 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
809 context_size += I915_GTT_PAGE_SIZE; /* for redzone */
811 if (INTEL_GEN(engine->i915) == 12) {
812 ce->wa_bb_page = context_size / PAGE_SIZE;
813 context_size += PAGE_SIZE;
816 obj = i915_gem_object_create_shmem(engine->i915, context_size);
818 return ERR_CAST(obj);
820 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
822 i915_gem_object_put(obj);
829 static struct intel_timeline *
830 pinned_timeline(struct intel_context *ce, struct intel_engine_cs *engine)
832 struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
834 return intel_timeline_create_from_engine(engine, page_unmask_bits(tl));
837 int lrc_alloc(struct intel_context *ce, struct intel_engine_cs *engine)
839 struct intel_ring *ring;
840 struct i915_vma *vma;
843 GEM_BUG_ON(ce->state);
845 vma = __lrc_alloc_state(ce, engine);
849 ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
855 if (!page_mask_bits(ce->timeline)) {
856 struct intel_timeline *tl;
859 * Use the static global HWSP for the kernel context, and
860 * a dynamically allocated cacheline for everyone else.
862 if (unlikely(ce->timeline))
863 tl = pinned_timeline(ce, engine);
865 tl = intel_timeline_create(engine->gt);
880 intel_ring_put(ring);
886 void lrc_reset(struct intel_context *ce)
888 GEM_BUG_ON(!intel_context_is_pinned(ce));
890 intel_ring_reset(ce->ring, ce->ring->emit);
892 /* Scrub away the garbage */
893 lrc_init_regs(ce, ce->engine, true);
894 ce->lrc.lrca = lrc_update_regs(ce, ce->engine, ce->ring->tail);
898 lrc_pre_pin(struct intel_context *ce,
899 struct intel_engine_cs *engine,
900 struct i915_gem_ww_ctx *ww,
903 GEM_BUG_ON(!ce->state);
904 GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
906 *vaddr = i915_gem_object_pin_map(ce->state->obj,
907 i915_coherent_map_type(ce->engine->i915) |
910 return PTR_ERR_OR_ZERO(*vaddr);
914 lrc_pin(struct intel_context *ce,
915 struct intel_engine_cs *engine,
918 ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
919 ce->lrc.lrca = lrc_update_regs(ce, engine, ce->ring->tail);
923 void lrc_unpin(struct intel_context *ce)
925 check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
929 void lrc_post_unpin(struct intel_context *ce)
931 i915_gem_object_unpin_map(ce->state->obj);
934 void lrc_fini(struct intel_context *ce)
939 intel_ring_put(fetch_and_zero(&ce->ring));
940 i915_vma_put(fetch_and_zero(&ce->state));
943 void lrc_destroy(struct kref *kref)
945 struct intel_context *ce = container_of(kref, typeof(*ce), ref);
947 GEM_BUG_ON(!i915_active_is_idle(&ce->active));
948 GEM_BUG_ON(intel_context_is_pinned(ce));
952 intel_context_fini(ce);
953 intel_context_free(ce);
957 gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs)
959 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
960 MI_SRM_LRM_GLOBAL_GTT |
962 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
963 *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
964 CTX_TIMESTAMP * sizeof(u32);
967 *cs++ = MI_LOAD_REGISTER_REG |
968 MI_LRR_SOURCE_CS_MMIO |
970 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
971 *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0));
973 *cs++ = MI_LOAD_REGISTER_REG |
974 MI_LRR_SOURCE_CS_MMIO |
976 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
977 *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(0));
983 gen12_emit_restore_scratch(const struct intel_context *ce, u32 *cs)
985 GEM_BUG_ON(lrc_ring_gpr0(ce->engine) == -1);
987 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
988 MI_SRM_LRM_GLOBAL_GTT |
990 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
991 *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
992 (lrc_ring_gpr0(ce->engine) + 1) * sizeof(u32);
999 gen12_emit_cmd_buf_wa(const struct intel_context *ce, u32 *cs)
1001 GEM_BUG_ON(lrc_ring_cmd_buf_cctl(ce->engine) == -1);
1003 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
1004 MI_SRM_LRM_GLOBAL_GTT |
1006 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
1007 *cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
1008 (lrc_ring_cmd_buf_cctl(ce->engine) + 1) * sizeof(u32);
1011 *cs++ = MI_LOAD_REGISTER_REG |
1012 MI_LRR_SOURCE_CS_MMIO |
1014 *cs++ = i915_mmio_reg_offset(GEN8_RING_CS_GPR(0, 0));
1015 *cs++ = i915_mmio_reg_offset(RING_CMD_BUF_CCTL(0));
1021 gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
1023 cs = gen12_emit_timestamp_wa(ce, cs);
1024 cs = gen12_emit_cmd_buf_wa(ce, cs);
1025 cs = gen12_emit_restore_scratch(ce, cs);
1031 gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
1033 cs = gen12_emit_timestamp_wa(ce, cs);
1034 cs = gen12_emit_restore_scratch(ce, cs);
1039 static inline u32 context_wa_bb_offset(const struct intel_context *ce)
1041 return PAGE_SIZE * ce->wa_bb_page;
1044 static u32 *context_indirect_bb(const struct intel_context *ce)
1048 GEM_BUG_ON(!ce->wa_bb_page);
1050 ptr = ce->lrc_reg_state;
1051 ptr -= LRC_STATE_OFFSET; /* back to start of context image */
1052 ptr += context_wa_bb_offset(ce);
1058 setup_indirect_ctx_bb(const struct intel_context *ce,
1059 const struct intel_engine_cs *engine,
1060 u32 *(*emit)(const struct intel_context *, u32 *))
1062 u32 * const start = context_indirect_bb(ce);
1065 cs = emit(ce, start);
1066 GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs));
1067 while ((unsigned long)cs % CACHELINE_BYTES)
1070 lrc_setup_indirect_ctx(ce->lrc_reg_state, engine,
1071 i915_ggtt_offset(ce->state) +
1072 context_wa_bb_offset(ce),
1073 (cs - start) * sizeof(*cs));
1077 * The context descriptor encodes various attributes of a context,
1078 * including its GTT address and some flags. Because it's fairly
1079 * expensive to calculate, we'll just do it once and cache the result,
1080 * which remains valid until the context is unpinned.
1082 * This is what a descriptor looks like, from LSB to MSB::
1084 * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
1085 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
1086 * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC)
1087 * bits 53-54: mbz, reserved for use by hardware
1088 * bits 55-63: group ID, currently unused and set to 0
1090 * Starting from Gen11, the upper dword of the descriptor has a new format:
1092 * bits 32-36: reserved
1093 * bits 37-47: SW context ID
1094 * bits 48:53: engine instance
1095 * bit 54: mbz, reserved for use by hardware
1096 * bits 55-60: SW counter
1097 * bits 61-63: engine class
1099 * engine info, SW context ID and SW counter need to form a unique number
1100 * (Context ID) per lrc.
1102 static inline u32 lrc_descriptor(const struct intel_context *ce)
1106 desc = INTEL_LEGACY_32B_CONTEXT;
1107 if (i915_vm_is_4lvl(ce->vm))
1108 desc = INTEL_LEGACY_64B_CONTEXT;
1109 desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT;
1111 desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
1112 if (IS_GEN(ce->vm->i915, 8))
1113 desc |= GEN8_CTX_L3LLC_COHERENT;
1115 return i915_ggtt_offset(ce->state) | desc;
1118 u32 lrc_update_regs(const struct intel_context *ce,
1119 const struct intel_engine_cs *engine,
1122 struct intel_ring *ring = ce->ring;
1123 u32 *regs = ce->lrc_reg_state;
1125 GEM_BUG_ON(!intel_ring_offset_valid(ring, head));
1126 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
1128 regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
1129 regs[CTX_RING_HEAD] = head;
1130 regs[CTX_RING_TAIL] = ring->tail;
1131 regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
1134 if (engine->class == RENDER_CLASS) {
1135 regs[CTX_R_PWR_CLK_STATE] =
1136 intel_sseu_make_rpcs(engine->gt, &ce->sseu);
1138 i915_oa_init_reg_state(ce, engine);
1141 if (ce->wa_bb_page) {
1142 u32 *(*fn)(const struct intel_context *ce, u32 *cs);
1144 fn = gen12_emit_indirect_ctx_xcs;
1145 if (ce->engine->class == RENDER_CLASS)
1146 fn = gen12_emit_indirect_ctx_rcs;
1148 /* Mutually exclusive wrt to global indirect bb */
1149 GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size);
1150 setup_indirect_ctx_bb(ce, engine, fn);
1153 return lrc_descriptor(ce) | CTX_DESC_FORCE_RESTORE;
1156 void lrc_update_offsets(struct intel_context *ce,
1157 struct intel_engine_cs *engine)
1159 set_offsets(ce->lrc_reg_state, reg_offsets(engine), engine, false);
1162 void lrc_check_regs(const struct intel_context *ce,
1163 const struct intel_engine_cs *engine,
1166 const struct intel_ring *ring = ce->ring;
1167 u32 *regs = ce->lrc_reg_state;
1171 if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) {
1172 pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n",
1174 regs[CTX_RING_START],
1175 i915_ggtt_offset(ring->vma));
1176 regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
1180 if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) !=
1181 (RING_CTL_SIZE(ring->size) | RING_VALID)) {
1182 pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n",
1185 (u32)(RING_CTL_SIZE(ring->size) | RING_VALID));
1186 regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
1190 x = lrc_ring_mi_mode(engine);
1191 if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) {
1192 pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n",
1193 engine->name, regs[x + 1]);
1194 regs[x + 1] &= ~STOP_RING;
1195 regs[x + 1] |= STOP_RING << 16;
1199 WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when);
1203 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
1204 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
1205 * but there is a slight complication as this is applied in WA batch where the
1206 * values are only initialized once so we cannot take register value at the
1207 * beginning and reuse it further; hence we save its value to memory, upload a
1208 * constant value with bit21 set and then we restore it back with the saved value.
1209 * To simplify the WA, a constant value is formed by using the default value
1210 * of this register. This shouldn't be a problem because we are only modifying
1211 * it for a short period and this batch in non-premptible. We can ofcourse
1212 * use additional instructions that read the actual value of the register
1213 * at that time and set our bit of interest but it makes the WA complicated.
1215 * This WA is also required for Gen9 so extracting as a function avoids
1219 gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
1221 /* NB no one else is allowed to scribble over scratch + 256! */
1222 *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
1223 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
1224 *batch++ = intel_gt_scratch_offset(engine->gt,
1225 INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
1228 *batch++ = MI_LOAD_REGISTER_IMM(1);
1229 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
1230 *batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES;
1232 batch = gen8_emit_pipe_control(batch,
1233 PIPE_CONTROL_CS_STALL |
1234 PIPE_CONTROL_DC_FLUSH_ENABLE,
1237 *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
1238 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
1239 *batch++ = intel_gt_scratch_offset(engine->gt,
1240 INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA);
1247 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
1248 * initialized at the beginning and shared across all contexts but this field
1249 * helps us to have multiple batches at different offsets and select them based
1250 * on a criteria. At the moment this batch always start at the beginning of the page
1251 * and at this point we don't have multiple wa_ctx batch buffers.
1253 * The number of WA applied are not known at the beginning; we use this field
1254 * to return the no of DWORDS written.
1256 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1257 * so it adds NOOPs as padding to make it cacheline aligned.
1258 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1259 * makes a complete batch buffer.
1261 static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1263 /* WaDisableCtxRestoreArbitration:bdw,chv */
1264 *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1266 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1267 if (IS_BROADWELL(engine->i915))
1268 batch = gen8_emit_flush_coherentl3_wa(engine, batch);
1270 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1271 /* Actual scratch location is at 128 bytes offset */
1272 batch = gen8_emit_pipe_control(batch,
1273 PIPE_CONTROL_FLUSH_L3 |
1274 PIPE_CONTROL_STORE_DATA_INDEX |
1275 PIPE_CONTROL_CS_STALL |
1276 PIPE_CONTROL_QW_WRITE,
1277 LRC_PPHWSP_SCRATCH_ADDR);
1279 *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1281 /* Pad to end of cacheline */
1282 while ((unsigned long)batch % CACHELINE_BYTES)
1286 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1287 * execution depends on the length specified in terms of cache lines
1288 * in the register CTX_RCS_INDIRECT_CTX
1299 static u32 *emit_lri(u32 *batch, const struct lri *lri, unsigned int count)
1301 GEM_BUG_ON(!count || count > 63);
1303 *batch++ = MI_LOAD_REGISTER_IMM(count);
1305 *batch++ = i915_mmio_reg_offset(lri->reg);
1306 *batch++ = lri->value;
1307 } while (lri++, --count);
1313 static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1315 static const struct lri lri[] = {
1316 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
1318 COMMON_SLICE_CHICKEN2,
1319 __MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE,
1326 __MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX,
1327 FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
1333 __MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX,
1334 _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
1338 *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1340 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
1341 batch = gen8_emit_flush_coherentl3_wa(engine, batch);
1343 /* WaClearSlmSpaceAtContextSwitch:skl,bxt,kbl,glk,cfl */
1344 batch = gen8_emit_pipe_control(batch,
1345 PIPE_CONTROL_FLUSH_L3 |
1346 PIPE_CONTROL_STORE_DATA_INDEX |
1347 PIPE_CONTROL_CS_STALL |
1348 PIPE_CONTROL_QW_WRITE,
1349 LRC_PPHWSP_SCRATCH_ADDR);
1351 batch = emit_lri(batch, lri, ARRAY_SIZE(lri));
1353 /* WaMediaPoolStateCmdInWABB:bxt,glk */
1354 if (HAS_POOLED_EU(engine->i915)) {
1356 * EU pool configuration is setup along with golden context
1357 * during context initialization. This value depends on
1358 * device type (2x6 or 3x6) and needs to be updated based
1359 * on which subslice is disabled especially for 2x6
1360 * devices, however it is safe to load default
1361 * configuration of 3x6 device instead of masking off
1362 * corresponding bits because HW ignores bits of a disabled
1363 * subslice and drops down to appropriate config. Please
1364 * see render_state_setup() in i915_gem_render_state.c for
1365 * possible configurations, to avoid duplication they are
1366 * not shown here again.
1368 *batch++ = GEN9_MEDIA_POOL_STATE;
1369 *batch++ = GEN9_MEDIA_POOL_ENABLE;
1370 *batch++ = 0x00777000;
1376 *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1378 /* Pad to end of cacheline */
1379 while ((unsigned long)batch % CACHELINE_BYTES)
1386 gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1391 * WaPipeControlBefore3DStateSamplePattern: cnl
1393 * Ensure the engine is idle prior to programming a
1394 * 3DSTATE_SAMPLE_PATTERN during a context restore.
1396 batch = gen8_emit_pipe_control(batch,
1397 PIPE_CONTROL_CS_STALL,
1400 * WaPipeControlBefore3DStateSamplePattern says we need 4 dwords for
1401 * the PIPE_CONTROL followed by 12 dwords of 0x0, so 16 dwords in
1402 * total. However, a PIPE_CONTROL is 6 dwords long, not 4, which is
1403 * confusing. Since gen8_emit_pipe_control() already advances the
1404 * batch by 6 dwords, we advance the other 10 here, completing a
1405 * cacheline. It's not clear if the workaround requires this padding
1406 * before other commands, or if it's just the regular padding we would
1407 * already have for the workaround bb, so leave it here for now.
1409 for (i = 0; i < 10; i++)
1412 /* Pad to end of cacheline */
1413 while ((unsigned long)batch % CACHELINE_BYTES)
1419 #define CTX_WA_BB_SIZE (PAGE_SIZE)
1421 static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
1423 struct drm_i915_gem_object *obj;
1424 struct i915_vma *vma;
1427 obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_SIZE);
1429 return PTR_ERR(obj);
1431 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
1437 err = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
1441 engine->wa_ctx.vma = vma;
1445 i915_gem_object_put(obj);
1449 void lrc_fini_wa_ctx(struct intel_engine_cs *engine)
1451 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
1454 typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
1456 int lrc_init_wa_ctx(struct intel_engine_cs *engine)
1458 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1459 struct i915_wa_ctx_bb *wa_bb[] = {
1460 &wa_ctx->indirect_ctx, &wa_ctx->per_ctx
1462 wa_bb_func_t wa_bb_fn[ARRAY_SIZE(wa_bb)];
1463 void *batch, *batch_ptr;
1467 if (engine->class != RENDER_CLASS)
1470 switch (INTEL_GEN(engine->i915)) {
1475 wa_bb_fn[0] = gen10_init_indirectctx_bb;
1479 wa_bb_fn[0] = gen9_init_indirectctx_bb;
1483 wa_bb_fn[0] = gen8_init_indirectctx_bb;
1487 MISSING_CASE(INTEL_GEN(engine->i915));
1491 ret = lrc_setup_wa_ctx(engine);
1493 drm_dbg(&engine->i915->drm,
1494 "Failed to setup context WA page: %d\n", ret);
1498 batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB);
1501 * Emit the two workaround batch buffers, recording the offset from the
1502 * start of the workaround batch buffer object for each and their
1506 for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
1507 wa_bb[i]->offset = batch_ptr - batch;
1508 if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
1509 CACHELINE_BYTES))) {
1514 batch_ptr = wa_bb_fn[i](engine, batch_ptr);
1515 wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
1517 GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_SIZE);
1519 __i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch);
1520 __i915_gem_object_release_map(wa_ctx->vma->obj);
1522 lrc_fini_wa_ctx(engine);
1527 static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
1529 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1530 ce->runtime.num_underflow++;
1531 ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt);
1535 void lrc_update_runtime(struct intel_context *ce)
1540 if (intel_context_is_barrier(ce))
1543 old = ce->runtime.last;
1544 ce->runtime.last = lrc_get_runtime(ce);
1545 dt = ce->runtime.last - old;
1547 if (unlikely(dt < 0)) {
1548 CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
1549 old, ce->runtime.last, dt);
1550 st_update_runtime_underflow(ce, dt);
1554 ewma_runtime_add(&ce->runtime.avg, dt);
1555 ce->runtime.total += dt;
1558 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1559 #include "selftest_lrc.c"