1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include "debugfs_gt.h"
8 #include "gem/i915_gem_lmem.h"
10 #include "intel_context.h"
12 #include "intel_gt_buffer_pool.h"
13 #include "intel_gt_clock_utils.h"
14 #include "intel_gt_pm.h"
15 #include "intel_gt_requests.h"
16 #include "intel_migrate.h"
17 #include "intel_mocs.h"
18 #include "intel_rc6.h"
19 #include "intel_renderstate.h"
20 #include "intel_rps.h"
21 #include "intel_uncore.h"
23 #include "shmem_utils.h"
25 void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
28 gt->uncore = &i915->uncore;
30 spin_lock_init(>->irq_lock);
32 INIT_LIST_HEAD(>->closed_vma);
33 spin_lock_init(>->closed_lock);
35 init_llist_head(>->watchdog.list);
36 INIT_WORK(>->watchdog.work, intel_gt_watchdog_work);
38 intel_gt_init_buffer_pool(gt);
39 intel_gt_init_reset(gt);
40 intel_gt_init_requests(gt);
41 intel_gt_init_timelines(gt);
42 intel_gt_pm_init_early(gt);
44 intel_rps_init_early(>->rps);
45 intel_uc_init_early(>->uc);
48 int intel_gt_probe_lmem(struct intel_gt *gt)
50 struct drm_i915_private *i915 = gt->i915;
51 struct intel_memory_region *mem;
55 mem = intel_gt_setup_lmem(gt);
56 if (mem == ERR_PTR(-ENODEV))
57 mem = intel_gt_setup_fake_lmem(gt);
64 "Failed to setup region(%d) type=%d\n",
65 err, INTEL_MEMORY_LOCAL);
69 id = INTEL_REGION_LMEM;
73 intel_memory_region_set_name(mem, "local%u", mem->instance);
75 GEM_BUG_ON(!HAS_REGION(i915, id));
76 GEM_BUG_ON(i915->mm.regions[id]);
77 i915->mm.regions[id] = mem;
82 void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
87 static const struct intel_mmio_range icl_l3bank_steering_table[] = {
88 { 0x00B100, 0x00B3FF },
92 int intel_gt_init_mmio(struct intel_gt *gt)
94 intel_gt_init_clock_frequency(gt);
96 intel_uc_init_mmio(>->uc);
97 intel_sseu_info_init(gt);
99 if (GRAPHICS_VER(gt->i915) >= 11) {
100 gt->steering_table[L3BANK] = icl_l3bank_steering_table;
101 gt->info.l3bank_mask =
102 ~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
106 return intel_engines_init_mmio(gt);
109 static void init_unused_ring(struct intel_gt *gt, u32 base)
111 struct intel_uncore *uncore = gt->uncore;
113 intel_uncore_write(uncore, RING_CTL(base), 0);
114 intel_uncore_write(uncore, RING_HEAD(base), 0);
115 intel_uncore_write(uncore, RING_TAIL(base), 0);
116 intel_uncore_write(uncore, RING_START(base), 0);
119 static void init_unused_rings(struct intel_gt *gt)
121 struct drm_i915_private *i915 = gt->i915;
124 init_unused_ring(gt, PRB1_BASE);
125 init_unused_ring(gt, SRB0_BASE);
126 init_unused_ring(gt, SRB1_BASE);
127 init_unused_ring(gt, SRB2_BASE);
128 init_unused_ring(gt, SRB3_BASE);
129 } else if (GRAPHICS_VER(i915) == 2) {
130 init_unused_ring(gt, SRB0_BASE);
131 init_unused_ring(gt, SRB1_BASE);
132 } else if (GRAPHICS_VER(i915) == 3) {
133 init_unused_ring(gt, PRB1_BASE);
134 init_unused_ring(gt, PRB2_BASE);
138 int intel_gt_init_hw(struct intel_gt *gt)
140 struct drm_i915_private *i915 = gt->i915;
141 struct intel_uncore *uncore = gt->uncore;
144 gt->last_init_time = ktime_get();
146 /* Double layer security blanket, see i915_gem_init() */
147 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
149 if (HAS_EDRAM(i915) && GRAPHICS_VER(i915) < 9)
150 intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
152 if (IS_HASWELL(i915))
153 intel_uncore_write(uncore,
154 MI_PREDICATE_RESULT_2,
156 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
158 /* Apply the GT workarounds... */
159 intel_gt_apply_workarounds(gt);
160 /* ...and determine whether they are sticking. */
161 intel_gt_verify_workarounds(gt, "init");
163 intel_gt_init_swizzling(gt);
166 * At least 830 can leave some of the unused rings
167 * "active" (ie. head != tail) after resume which
168 * will prevent c3 entry. Makes sure all unused rings
171 init_unused_rings(gt);
173 ret = i915_ppgtt_init_hw(gt);
175 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
179 /* We can't enable contexts until all firmware is loaded */
180 ret = intel_uc_init_hw(>->uc);
182 i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
189 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
193 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
195 intel_uncore_rmw(uncore, reg, 0, set);
198 static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
200 intel_uncore_rmw(uncore, reg, clr, 0);
203 static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
205 intel_uncore_rmw(uncore, reg, 0, 0);
208 static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
210 GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
211 GEN6_RING_FAULT_REG_POSTING_READ(engine);
215 intel_gt_clear_error_registers(struct intel_gt *gt,
216 intel_engine_mask_t engine_mask)
218 struct drm_i915_private *i915 = gt->i915;
219 struct intel_uncore *uncore = gt->uncore;
222 if (GRAPHICS_VER(i915) != 2)
223 clear_register(uncore, PGTBL_ER);
225 if (GRAPHICS_VER(i915) < 4)
226 clear_register(uncore, IPEIR(RENDER_RING_BASE));
228 clear_register(uncore, IPEIR_I965);
230 clear_register(uncore, EIR);
231 eir = intel_uncore_read(uncore, EIR);
234 * some errors might have become stuck,
237 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
238 rmw_set(uncore, EMR, eir);
239 intel_uncore_write(uncore, GEN2_IIR,
240 I915_MASTER_ERROR_INTERRUPT);
243 if (GRAPHICS_VER(i915) >= 12) {
244 rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
245 intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
246 } else if (GRAPHICS_VER(i915) >= 8) {
247 rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
248 intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
249 } else if (GRAPHICS_VER(i915) >= 6) {
250 struct intel_engine_cs *engine;
251 enum intel_engine_id id;
253 for_each_engine_masked(engine, gt, engine_mask, id)
254 gen8_clear_engine_error_register(engine);
258 static void gen6_check_faults(struct intel_gt *gt)
260 struct intel_engine_cs *engine;
261 enum intel_engine_id id;
264 for_each_engine(engine, gt, id) {
265 fault = GEN6_RING_FAULT_REG_READ(engine);
266 if (fault & RING_FAULT_VALID) {
267 drm_dbg(&engine->i915->drm, "Unexpected fault\n"
269 "\tAddress space: %s\n"
273 fault & RING_FAULT_GTTSEL_MASK ?
275 RING_FAULT_SRCID(fault),
276 RING_FAULT_FAULT_TYPE(fault));
281 static void gen8_check_faults(struct intel_gt *gt)
283 struct intel_uncore *uncore = gt->uncore;
284 i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
287 if (GRAPHICS_VER(gt->i915) >= 12) {
288 fault_reg = GEN12_RING_FAULT_REG;
289 fault_data0_reg = GEN12_FAULT_TLB_DATA0;
290 fault_data1_reg = GEN12_FAULT_TLB_DATA1;
292 fault_reg = GEN8_RING_FAULT_REG;
293 fault_data0_reg = GEN8_FAULT_TLB_DATA0;
294 fault_data1_reg = GEN8_FAULT_TLB_DATA1;
297 fault = intel_uncore_read(uncore, fault_reg);
298 if (fault & RING_FAULT_VALID) {
299 u32 fault_data0, fault_data1;
302 fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
303 fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
305 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
306 ((u64)fault_data0 << 12);
308 drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
309 "\tAddr: 0x%08x_%08x\n"
310 "\tAddress space: %s\n"
314 upper_32_bits(fault_addr), lower_32_bits(fault_addr),
315 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
316 GEN8_RING_FAULT_ENGINE_ID(fault),
317 RING_FAULT_SRCID(fault),
318 RING_FAULT_FAULT_TYPE(fault));
322 void intel_gt_check_and_clear_faults(struct intel_gt *gt)
324 struct drm_i915_private *i915 = gt->i915;
326 /* From GEN8 onwards we only have one 'All Engine Fault Register' */
327 if (GRAPHICS_VER(i915) >= 8)
328 gen8_check_faults(gt);
329 else if (GRAPHICS_VER(i915) >= 6)
330 gen6_check_faults(gt);
334 intel_gt_clear_error_registers(gt, ALL_ENGINES);
337 void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
339 struct intel_uncore *uncore = gt->uncore;
340 intel_wakeref_t wakeref;
343 * No actual flushing is required for the GTT write domain for reads
344 * from the GTT domain. Writes to it "immediately" go to main memory
345 * as far as we know, so there's no chipset flush. It also doesn't
346 * land in the GPU render cache.
348 * However, we do have to enforce the order so that all writes through
349 * the GTT land before any writes to the device, such as updates to
352 * We also have to wait a bit for the writes to land from the GTT.
353 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
354 * timing. This issue has only been observed when switching quickly
355 * between GTT writes and CPU reads from inside the kernel on recent hw,
356 * and it appears to only affect discrete GTT blocks (i.e. on LLC
357 * system agents we cannot reproduce this behaviour, until Cannonlake
363 if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
366 intel_gt_chipset_flush(gt);
368 with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
371 spin_lock_irqsave(&uncore->lock, flags);
372 intel_uncore_posting_read_fw(uncore,
373 RING_HEAD(RENDER_RING_BASE));
374 spin_unlock_irqrestore(&uncore->lock, flags);
378 void intel_gt_chipset_flush(struct intel_gt *gt)
381 if (GRAPHICS_VER(gt->i915) < 6)
382 intel_gtt_chipset_flush();
385 void intel_gt_driver_register(struct intel_gt *gt)
387 intel_rps_driver_register(>->rps);
389 debugfs_gt_register(gt);
392 static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
394 struct drm_i915_private *i915 = gt->i915;
395 struct drm_i915_gem_object *obj;
396 struct i915_vma *vma;
399 obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE);
401 obj = i915_gem_object_create_stolen(i915, size);
403 obj = i915_gem_object_create_internal(i915, size);
405 drm_err(&i915->drm, "Failed to allocate scratch page\n");
409 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
415 ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
419 gt->scratch = i915_vma_make_unshrinkable(vma);
424 i915_gem_object_put(obj);
428 static void intel_gt_fini_scratch(struct intel_gt *gt)
430 i915_vma_unpin_and_release(>->scratch, 0);
433 static struct i915_address_space *kernel_vm(struct intel_gt *gt)
435 if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
436 return &i915_ppgtt_create(gt)->vm;
438 return i915_vm_get(>->ggtt->vm);
441 static int __engines_record_defaults(struct intel_gt *gt)
443 struct i915_request *requests[I915_NUM_ENGINES] = {};
444 struct intel_engine_cs *engine;
445 enum intel_engine_id id;
449 * As we reset the gpu during very early sanitisation, the current
450 * register state on the GPU should reflect its defaults values.
451 * We load a context onto the hw (with restore-inhibit), then switch
452 * over to a second context to save that default register state. We
453 * can then prime every new context with that state so they all start
454 * from the same default HW values.
457 for_each_engine(engine, gt, id) {
458 struct intel_renderstate so;
459 struct intel_context *ce;
460 struct i915_request *rq;
462 /* We must be able to switch to something! */
463 GEM_BUG_ON(!engine->kernel_context);
465 ce = intel_context_create(engine);
471 err = intel_renderstate_init(&so, ce);
475 rq = i915_request_create(ce);
481 err = intel_engine_emit_ctx_wa(rq);
485 err = intel_renderstate_emit(&so, rq);
490 requests[id] = i915_request_get(rq);
491 i915_request_add(rq);
493 intel_renderstate_fini(&so, ce);
496 intel_context_put(ce);
501 /* Flush the default context image to memory, and enable powersaving. */
502 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
507 for (id = 0; id < ARRAY_SIZE(requests); id++) {
508 struct i915_request *rq;
515 if (rq->fence.error) {
520 GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
521 if (!rq->context->state)
524 /* Keep a copy of the state's backing pages; free the obj */
525 state = shmem_create_from_object(rq->context->state->obj);
527 err = PTR_ERR(state);
530 rq->engine->default_state = state;
535 * If we have to abandon now, we expect the engines to be idle
536 * and ready to be torn-down. The quickest way we can accomplish
537 * this is by declaring ourselves wedged.
540 intel_gt_set_wedged(gt);
542 for (id = 0; id < ARRAY_SIZE(requests); id++) {
543 struct intel_context *ce;
544 struct i915_request *rq;
551 i915_request_put(rq);
552 intel_context_put(ce);
557 static int __engines_verify_workarounds(struct intel_gt *gt)
559 struct intel_engine_cs *engine;
560 enum intel_engine_id id;
563 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
566 for_each_engine(engine, gt, id) {
567 if (intel_engine_verify_workarounds(engine, "load"))
571 /* Flush and restore the kernel context for safety */
572 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
578 static void __intel_gt_disable(struct intel_gt *gt)
580 intel_gt_set_wedged_on_fini(gt);
582 intel_gt_suspend_prepare(gt);
583 intel_gt_suspend_late(gt);
585 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
588 int intel_gt_init(struct intel_gt *gt)
592 err = i915_inject_probe_error(gt->i915, -ENODEV);
597 * This is just a security blanket to placate dragons.
598 * On some systems, we very sporadically observe that the first TLBs
599 * used by the CS may be stale, despite us poking the TLB reset. If
600 * we hold the forcewake during initialisation these problems
601 * just magically go away.
603 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
605 err = intel_gt_init_scratch(gt,
606 GRAPHICS_VER(gt->i915) == 2 ? SZ_256K : SZ_4K);
610 intel_gt_pm_init(gt);
612 gt->vm = kernel_vm(gt);
618 err = intel_engines_init(gt);
622 err = intel_uc_init(>->uc);
626 err = intel_gt_resume(gt);
630 err = __engines_record_defaults(gt);
634 err = __engines_verify_workarounds(gt);
638 err = i915_inject_probe_error(gt->i915, -EIO);
642 intel_migrate_init(>->migrate, gt);
646 __intel_gt_disable(gt);
647 intel_uc_fini_hw(>->uc);
649 intel_uc_fini(>->uc);
651 intel_engines_release(gt);
652 i915_vm_put(fetch_and_zero(>->vm));
654 intel_gt_pm_fini(gt);
655 intel_gt_fini_scratch(gt);
658 intel_gt_set_wedged_on_init(gt);
659 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
663 void intel_gt_driver_remove(struct intel_gt *gt)
665 __intel_gt_disable(gt);
667 intel_migrate_fini(>->migrate);
668 intel_uc_driver_remove(>->uc);
670 intel_engines_release(gt);
673 void intel_gt_driver_unregister(struct intel_gt *gt)
675 intel_wakeref_t wakeref;
677 intel_rps_driver_unregister(>->rps);
680 * Upon unregistering the device to prevent any new users, cancel
681 * all in-flight requests so that we can quickly unbind the active
684 intel_gt_set_wedged(gt);
686 /* Scrub all HW state upon release */
687 with_intel_runtime_pm(gt->uncore->rpm, wakeref)
688 __intel_gt_reset(gt, ALL_ENGINES);
691 void intel_gt_driver_release(struct intel_gt *gt)
693 struct i915_address_space *vm;
695 vm = fetch_and_zero(>->vm);
696 if (vm) /* FIXME being called twice on error paths :( */
699 intel_gt_pm_fini(gt);
700 intel_gt_fini_scratch(gt);
701 intel_gt_fini_buffer_pool(gt);
704 void intel_gt_driver_late_release(struct intel_gt *gt)
706 /* We need to wait for inflight RCU frees to release their grip */
709 intel_uc_driver_late_release(>->uc);
710 intel_gt_fini_requests(gt);
711 intel_gt_fini_reset(gt);
712 intel_gt_fini_timelines(gt);
713 intel_engines_free(gt);
717 * intel_gt_reg_needs_read_steering - determine whether a register read
718 * requires explicit steering
720 * @reg: the register to check steering requirements for
721 * @type: type of multicast steering to check
723 * Determines whether @reg needs explicit steering of a specific type for
726 * Returns false if @reg does not belong to a register range of the given
727 * steering type, or if the default (subslice-based) steering IDs are suitable
728 * for @type steering too.
730 static bool intel_gt_reg_needs_read_steering(struct intel_gt *gt,
732 enum intel_steering_type type)
734 const u32 offset = i915_mmio_reg_offset(reg);
735 const struct intel_mmio_range *entry;
737 if (likely(!intel_gt_needs_read_steering(gt, type)))
740 for (entry = gt->steering_table[type]; entry->end; entry++) {
741 if (offset >= entry->start && offset <= entry->end)
749 * intel_gt_get_valid_steering - determines valid IDs for a class of MCR steering
751 * @type: multicast register type
752 * @sliceid: Slice ID returned
753 * @subsliceid: Subslice ID returned
755 * Determines sliceid and subsliceid values that will steer reads
756 * of a specific multicast register class to a valid value.
758 static void intel_gt_get_valid_steering(struct intel_gt *gt,
759 enum intel_steering_type type,
760 u8 *sliceid, u8 *subsliceid)
764 GEM_DEBUG_WARN_ON(!gt->info.l3bank_mask); /* should be impossible! */
766 *sliceid = 0; /* unused */
767 *subsliceid = __ffs(gt->info.l3bank_mask);
777 * intel_gt_read_register_fw - reads a GT register with support for multicast
779 * @reg: register to read
781 * This function will read a GT register. If the register is a multicast
782 * register, the read will be steered to a valid instance (i.e., one that
783 * isn't fused off or powered down by power gating).
785 * Returns the value from a valid instance of @reg.
787 u32 intel_gt_read_register_fw(struct intel_gt *gt, i915_reg_t reg)
790 u8 sliceid, subsliceid;
792 for (type = 0; type < NUM_STEERING_TYPES; type++) {
793 if (intel_gt_reg_needs_read_steering(gt, reg, type)) {
794 intel_gt_get_valid_steering(gt, type, &sliceid,
796 return intel_uncore_read_with_mcr_steering_fw(gt->uncore,
803 return intel_uncore_read_fw(gt->uncore, reg);
806 void intel_gt_info_print(const struct intel_gt_info *info,
807 struct drm_printer *p)
809 drm_printf(p, "available engines: %x\n", info->engine_mask);
811 intel_sseu_dump(&info->sseu, p);