1 // SPDX-License-Identifier: MIT
3 * Copyright © 2019 Intel Corporation
6 #include "debugfs_gt.h"
8 #include "gem/i915_gem_lmem.h"
10 #include "intel_context.h"
12 #include "intel_gt_buffer_pool.h"
13 #include "intel_gt_clock_utils.h"
14 #include "intel_gt_pm.h"
15 #include "intel_gt_requests.h"
16 #include "intel_mocs.h"
17 #include "intel_rc6.h"
18 #include "intel_renderstate.h"
19 #include "intel_rps.h"
20 #include "intel_uncore.h"
22 #include "shmem_utils.h"
24 void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
27 gt->uncore = &i915->uncore;
29 spin_lock_init(>->irq_lock);
31 INIT_LIST_HEAD(>->closed_vma);
32 spin_lock_init(>->closed_lock);
34 intel_gt_init_buffer_pool(gt);
35 intel_gt_init_reset(gt);
36 intel_gt_init_requests(gt);
37 intel_gt_init_timelines(gt);
38 intel_gt_pm_init_early(gt);
40 intel_rps_init_early(>->rps);
41 intel_uc_init_early(>->uc);
44 int intel_gt_probe_lmem(struct intel_gt *gt)
46 struct drm_i915_private *i915 = gt->i915;
47 struct intel_memory_region *mem;
51 mem = intel_gt_setup_lmem(gt);
52 if (mem == ERR_PTR(-ENODEV))
53 mem = intel_gt_setup_fake_lmem(gt);
60 "Failed to setup region(%d) type=%d\n",
61 err, INTEL_MEMORY_LOCAL);
65 id = INTEL_REGION_LMEM;
68 mem->type = INTEL_MEMORY_LOCAL;
71 GEM_BUG_ON(!HAS_REGION(i915, id));
72 GEM_BUG_ON(i915->mm.regions[id]);
73 i915->mm.regions[id] = mem;
78 void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
83 int intel_gt_init_mmio(struct intel_gt *gt)
85 intel_gt_init_clock_frequency(gt);
87 intel_uc_init_mmio(>->uc);
88 intel_sseu_info_init(gt);
90 return intel_engines_init_mmio(gt);
93 static void init_unused_ring(struct intel_gt *gt, u32 base)
95 struct intel_uncore *uncore = gt->uncore;
97 intel_uncore_write(uncore, RING_CTL(base), 0);
98 intel_uncore_write(uncore, RING_HEAD(base), 0);
99 intel_uncore_write(uncore, RING_TAIL(base), 0);
100 intel_uncore_write(uncore, RING_START(base), 0);
103 static void init_unused_rings(struct intel_gt *gt)
105 struct drm_i915_private *i915 = gt->i915;
108 init_unused_ring(gt, PRB1_BASE);
109 init_unused_ring(gt, SRB0_BASE);
110 init_unused_ring(gt, SRB1_BASE);
111 init_unused_ring(gt, SRB2_BASE);
112 init_unused_ring(gt, SRB3_BASE);
113 } else if (IS_GEN(i915, 2)) {
114 init_unused_ring(gt, SRB0_BASE);
115 init_unused_ring(gt, SRB1_BASE);
116 } else if (IS_GEN(i915, 3)) {
117 init_unused_ring(gt, PRB1_BASE);
118 init_unused_ring(gt, PRB2_BASE);
122 int intel_gt_init_hw(struct intel_gt *gt)
124 struct drm_i915_private *i915 = gt->i915;
125 struct intel_uncore *uncore = gt->uncore;
128 gt->last_init_time = ktime_get();
130 /* Double layer security blanket, see i915_gem_init() */
131 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
133 if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9)
134 intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf));
136 if (IS_HASWELL(i915))
137 intel_uncore_write(uncore,
138 MI_PREDICATE_RESULT_2,
140 LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
142 /* Apply the GT workarounds... */
143 intel_gt_apply_workarounds(gt);
144 /* ...and determine whether they are sticking. */
145 intel_gt_verify_workarounds(gt, "init");
147 intel_gt_init_swizzling(gt);
150 * At least 830 can leave some of the unused rings
151 * "active" (ie. head != tail) after resume which
152 * will prevent c3 entry. Makes sure all unused rings
155 init_unused_rings(gt);
157 ret = i915_ppgtt_init_hw(gt);
159 DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
163 /* We can't enable contexts until all firmware is loaded */
164 ret = intel_uc_init_hw(>->uc);
166 i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
173 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
177 static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
179 intel_uncore_rmw(uncore, reg, 0, set);
182 static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
184 intel_uncore_rmw(uncore, reg, clr, 0);
187 static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
189 intel_uncore_rmw(uncore, reg, 0, 0);
192 static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
194 GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
195 GEN6_RING_FAULT_REG_POSTING_READ(engine);
199 intel_gt_clear_error_registers(struct intel_gt *gt,
200 intel_engine_mask_t engine_mask)
202 struct drm_i915_private *i915 = gt->i915;
203 struct intel_uncore *uncore = gt->uncore;
206 if (!IS_GEN(i915, 2))
207 clear_register(uncore, PGTBL_ER);
209 if (INTEL_GEN(i915) < 4)
210 clear_register(uncore, IPEIR(RENDER_RING_BASE));
212 clear_register(uncore, IPEIR_I965);
214 clear_register(uncore, EIR);
215 eir = intel_uncore_read(uncore, EIR);
218 * some errors might have become stuck,
221 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
222 rmw_set(uncore, EMR, eir);
223 intel_uncore_write(uncore, GEN2_IIR,
224 I915_MASTER_ERROR_INTERRUPT);
227 if (INTEL_GEN(i915) >= 12) {
228 rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
229 intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
230 } else if (INTEL_GEN(i915) >= 8) {
231 rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
232 intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
233 } else if (INTEL_GEN(i915) >= 6) {
234 struct intel_engine_cs *engine;
235 enum intel_engine_id id;
237 for_each_engine_masked(engine, gt, engine_mask, id)
238 gen8_clear_engine_error_register(engine);
242 static void gen6_check_faults(struct intel_gt *gt)
244 struct intel_engine_cs *engine;
245 enum intel_engine_id id;
248 for_each_engine(engine, gt, id) {
249 fault = GEN6_RING_FAULT_REG_READ(engine);
250 if (fault & RING_FAULT_VALID) {
251 drm_dbg(&engine->i915->drm, "Unexpected fault\n"
253 "\tAddress space: %s\n"
257 fault & RING_FAULT_GTTSEL_MASK ?
259 RING_FAULT_SRCID(fault),
260 RING_FAULT_FAULT_TYPE(fault));
265 static void gen8_check_faults(struct intel_gt *gt)
267 struct intel_uncore *uncore = gt->uncore;
268 i915_reg_t fault_reg, fault_data0_reg, fault_data1_reg;
271 if (INTEL_GEN(gt->i915) >= 12) {
272 fault_reg = GEN12_RING_FAULT_REG;
273 fault_data0_reg = GEN12_FAULT_TLB_DATA0;
274 fault_data1_reg = GEN12_FAULT_TLB_DATA1;
276 fault_reg = GEN8_RING_FAULT_REG;
277 fault_data0_reg = GEN8_FAULT_TLB_DATA0;
278 fault_data1_reg = GEN8_FAULT_TLB_DATA1;
281 fault = intel_uncore_read(uncore, fault_reg);
282 if (fault & RING_FAULT_VALID) {
283 u32 fault_data0, fault_data1;
286 fault_data0 = intel_uncore_read(uncore, fault_data0_reg);
287 fault_data1 = intel_uncore_read(uncore, fault_data1_reg);
289 fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
290 ((u64)fault_data0 << 12);
292 drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
293 "\tAddr: 0x%08x_%08x\n"
294 "\tAddress space: %s\n"
298 upper_32_bits(fault_addr), lower_32_bits(fault_addr),
299 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
300 GEN8_RING_FAULT_ENGINE_ID(fault),
301 RING_FAULT_SRCID(fault),
302 RING_FAULT_FAULT_TYPE(fault));
306 void intel_gt_check_and_clear_faults(struct intel_gt *gt)
308 struct drm_i915_private *i915 = gt->i915;
310 /* From GEN8 onwards we only have one 'All Engine Fault Register' */
311 if (INTEL_GEN(i915) >= 8)
312 gen8_check_faults(gt);
313 else if (INTEL_GEN(i915) >= 6)
314 gen6_check_faults(gt);
318 intel_gt_clear_error_registers(gt, ALL_ENGINES);
321 void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
323 struct intel_uncore *uncore = gt->uncore;
324 intel_wakeref_t wakeref;
327 * No actual flushing is required for the GTT write domain for reads
328 * from the GTT domain. Writes to it "immediately" go to main memory
329 * as far as we know, so there's no chipset flush. It also doesn't
330 * land in the GPU render cache.
332 * However, we do have to enforce the order so that all writes through
333 * the GTT land before any writes to the device, such as updates to
336 * We also have to wait a bit for the writes to land from the GTT.
337 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
338 * timing. This issue has only been observed when switching quickly
339 * between GTT writes and CPU reads from inside the kernel on recent hw,
340 * and it appears to only affect discrete GTT blocks (i.e. on LLC
341 * system agents we cannot reproduce this behaviour, until Cannonlake
347 if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
350 intel_gt_chipset_flush(gt);
352 with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref) {
355 spin_lock_irqsave(&uncore->lock, flags);
356 intel_uncore_posting_read_fw(uncore,
357 RING_HEAD(RENDER_RING_BASE));
358 spin_unlock_irqrestore(&uncore->lock, flags);
362 void intel_gt_chipset_flush(struct intel_gt *gt)
365 if (INTEL_GEN(gt->i915) < 6)
366 intel_gtt_chipset_flush();
369 void intel_gt_driver_register(struct intel_gt *gt)
371 intel_rps_driver_register(>->rps);
373 debugfs_gt_register(gt);
376 static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
378 struct drm_i915_private *i915 = gt->i915;
379 struct drm_i915_gem_object *obj;
380 struct i915_vma *vma;
383 obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE);
385 obj = i915_gem_object_create_stolen(i915, size);
387 obj = i915_gem_object_create_internal(i915, size);
389 drm_err(&i915->drm, "Failed to allocate scratch page\n");
393 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
399 ret = i915_ggtt_pin(vma, NULL, 0, PIN_HIGH);
403 gt->scratch = i915_vma_make_unshrinkable(vma);
408 i915_gem_object_put(obj);
412 static void intel_gt_fini_scratch(struct intel_gt *gt)
414 i915_vma_unpin_and_release(>->scratch, 0);
417 static struct i915_address_space *kernel_vm(struct intel_gt *gt)
419 if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
420 return &i915_ppgtt_create(gt)->vm;
422 return i915_vm_get(>->ggtt->vm);
425 static int __engines_record_defaults(struct intel_gt *gt)
427 struct i915_request *requests[I915_NUM_ENGINES] = {};
428 struct intel_engine_cs *engine;
429 enum intel_engine_id id;
433 * As we reset the gpu during very early sanitisation, the current
434 * register state on the GPU should reflect its defaults values.
435 * We load a context onto the hw (with restore-inhibit), then switch
436 * over to a second context to save that default register state. We
437 * can then prime every new context with that state so they all start
438 * from the same default HW values.
441 for_each_engine(engine, gt, id) {
442 struct intel_renderstate so;
443 struct intel_context *ce;
444 struct i915_request *rq;
446 /* We must be able to switch to something! */
447 GEM_BUG_ON(!engine->kernel_context);
449 ce = intel_context_create(engine);
455 err = intel_renderstate_init(&so, ce);
459 rq = i915_request_create(ce);
465 err = intel_engine_emit_ctx_wa(rq);
469 err = intel_renderstate_emit(&so, rq);
474 requests[id] = i915_request_get(rq);
475 i915_request_add(rq);
477 intel_renderstate_fini(&so, ce);
480 intel_context_put(ce);
485 /* Flush the default context image to memory, and enable powersaving. */
486 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
491 for (id = 0; id < ARRAY_SIZE(requests); id++) {
492 struct i915_request *rq;
499 if (rq->fence.error) {
504 GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
505 if (!rq->context->state)
508 /* Keep a copy of the state's backing pages; free the obj */
509 state = shmem_create_from_object(rq->context->state->obj);
511 err = PTR_ERR(state);
514 rq->engine->default_state = state;
519 * If we have to abandon now, we expect the engines to be idle
520 * and ready to be torn-down. The quickest way we can accomplish
521 * this is by declaring ourselves wedged.
524 intel_gt_set_wedged(gt);
526 for (id = 0; id < ARRAY_SIZE(requests); id++) {
527 struct intel_context *ce;
528 struct i915_request *rq;
535 i915_request_put(rq);
536 intel_context_put(ce);
541 static int __engines_verify_workarounds(struct intel_gt *gt)
543 struct intel_engine_cs *engine;
544 enum intel_engine_id id;
547 if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
550 for_each_engine(engine, gt, id) {
551 if (intel_engine_verify_workarounds(engine, "load"))
555 /* Flush and restore the kernel context for safety */
556 if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
562 static void __intel_gt_disable(struct intel_gt *gt)
564 intel_gt_set_wedged_on_fini(gt);
566 intel_gt_suspend_prepare(gt);
567 intel_gt_suspend_late(gt);
569 GEM_BUG_ON(intel_gt_pm_is_awake(gt));
572 int intel_gt_init(struct intel_gt *gt)
576 err = i915_inject_probe_error(gt->i915, -ENODEV);
581 * This is just a security blanket to placate dragons.
582 * On some systems, we very sporadically observe that the first TLBs
583 * used by the CS may be stale, despite us poking the TLB reset. If
584 * we hold the forcewake during initialisation these problems
585 * just magically go away.
587 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
589 err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K);
593 intel_gt_pm_init(gt);
595 gt->vm = kernel_vm(gt);
601 err = intel_engines_init(gt);
605 err = intel_uc_init(>->uc);
609 err = intel_gt_resume(gt);
613 err = __engines_record_defaults(gt);
617 err = __engines_verify_workarounds(gt);
621 err = i915_inject_probe_error(gt->i915, -EIO);
627 __intel_gt_disable(gt);
628 intel_uc_fini_hw(>->uc);
630 intel_uc_fini(>->uc);
632 intel_engines_release(gt);
633 i915_vm_put(fetch_and_zero(>->vm));
635 intel_gt_pm_fini(gt);
636 intel_gt_fini_scratch(gt);
639 intel_gt_set_wedged_on_init(gt);
640 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
644 void intel_gt_driver_remove(struct intel_gt *gt)
646 __intel_gt_disable(gt);
648 intel_uc_driver_remove(>->uc);
650 intel_engines_release(gt);
653 void intel_gt_driver_unregister(struct intel_gt *gt)
655 intel_wakeref_t wakeref;
657 intel_rps_driver_unregister(>->rps);
660 * Upon unregistering the device to prevent any new users, cancel
661 * all in-flight requests so that we can quickly unbind the active
664 intel_gt_set_wedged(gt);
666 /* Scrub all HW state upon release */
667 with_intel_runtime_pm(gt->uncore->rpm, wakeref)
668 __intel_gt_reset(gt, ALL_ENGINES);
671 void intel_gt_driver_release(struct intel_gt *gt)
673 struct i915_address_space *vm;
675 vm = fetch_and_zero(>->vm);
676 if (vm) /* FIXME being called twice on error paths :( */
679 intel_gt_pm_fini(gt);
680 intel_gt_fini_scratch(gt);
681 intel_gt_fini_buffer_pool(gt);
684 void intel_gt_driver_late_release(struct intel_gt *gt)
686 /* We need to wait for inflight RCU frees to release their grip */
689 intel_uc_driver_late_release(>->uc);
690 intel_gt_fini_requests(gt);
691 intel_gt_fini_reset(gt);
692 intel_gt_fini_timelines(gt);
693 intel_engines_free(gt);
696 void intel_gt_info_print(const struct intel_gt_info *info,
697 struct drm_printer *p)
699 drm_printf(p, "available engines: %x\n", info->engine_mask);
701 intel_sseu_dump(&info->sseu, p);