2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <drm/drm_print.h>
27 #include "gem/i915_gem_context.h"
31 #include "gt/intel_gt.h"
33 #include "intel_engine.h"
34 #include "intel_engine_pm.h"
35 #include "intel_engine_pool.h"
36 #include "intel_engine_user.h"
37 #include "intel_context.h"
38 #include "intel_lrc.h"
39 #include "intel_reset.h"
40 #include "intel_ring.h"
42 /* Haswell does have the CXT_SIZE register however it does not appear to be
43 * valid. Now, docs explain in dwords what is in the context object. The full
44 * size is 70720 bytes, however, the power context and execlist context will
45 * never be saved (power context is stored elsewhere, and execlists don't work
46 * on HSW) - so the final size, including the extra state required for the
47 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
49 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
51 #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
52 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
53 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
54 #define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
55 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
57 #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
59 #define MAX_MMIO_BASES 3
64 /* mmio bases table *must* be sorted in reverse gen order */
65 struct engine_mmio_base {
68 } mmio_bases[MAX_MMIO_BASES];
71 static const struct engine_info intel_engines[] = {
74 .class = RENDER_CLASS,
77 { .gen = 1, .base = RENDER_RING_BASE }
82 .class = COPY_ENGINE_CLASS,
85 { .gen = 6, .base = BLT_RING_BASE }
90 .class = VIDEO_DECODE_CLASS,
93 { .gen = 11, .base = GEN11_BSD_RING_BASE },
94 { .gen = 6, .base = GEN6_BSD_RING_BASE },
95 { .gen = 4, .base = BSD_RING_BASE }
100 .class = VIDEO_DECODE_CLASS,
103 { .gen = 11, .base = GEN11_BSD2_RING_BASE },
104 { .gen = 8, .base = GEN8_BSD2_RING_BASE }
109 .class = VIDEO_DECODE_CLASS,
112 { .gen = 11, .base = GEN11_BSD3_RING_BASE }
117 .class = VIDEO_DECODE_CLASS,
120 { .gen = 11, .base = GEN11_BSD4_RING_BASE }
125 .class = VIDEO_ENHANCEMENT_CLASS,
128 { .gen = 11, .base = GEN11_VEBOX_RING_BASE },
129 { .gen = 7, .base = VEBOX_RING_BASE }
134 .class = VIDEO_ENHANCEMENT_CLASS,
137 { .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
143 * intel_engine_context_size() - return the size of the context for an engine
144 * @dev_priv: i915 device private
145 * @class: engine class
147 * Each engine class may require a different amount of space for a context
150 * Return: size (in bytes) of an engine class specific context image
152 * Note: this size includes the HWSP, which is part of the context image
153 * in LRC mode, but does not include the "shared data page" used with
154 * GuC submission. The caller should account for this if using the GuC.
156 u32 intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
160 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
164 switch (INTEL_GEN(dev_priv)) {
166 MISSING_CASE(INTEL_GEN(dev_priv));
167 return DEFAULT_LR_CONTEXT_RENDER_SIZE;
170 return GEN11_LR_CONTEXT_RENDER_SIZE;
172 return GEN10_LR_CONTEXT_RENDER_SIZE;
174 return GEN9_LR_CONTEXT_RENDER_SIZE;
176 return GEN8_LR_CONTEXT_RENDER_SIZE;
178 if (IS_HASWELL(dev_priv))
179 return HSW_CXT_TOTAL_SIZE;
181 cxt_size = I915_READ(GEN7_CXT_SIZE);
182 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
185 cxt_size = I915_READ(CXT_SIZE);
186 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
191 * There is a discrepancy here between the size reported
192 * by the register and the size of the context layout
193 * in the docs. Both are described as authorative!
195 * The discrepancy is on the order of a few cachelines,
196 * but the total is under one page (4k), which is our
197 * minimum allocation anyway so it should all come
200 cxt_size = I915_READ(CXT_SIZE) + 1;
201 DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n",
205 return round_up(cxt_size * 64, PAGE_SIZE);
208 /* For the special day when i810 gets merged. */
216 case VIDEO_DECODE_CLASS:
217 case VIDEO_ENHANCEMENT_CLASS:
218 case COPY_ENGINE_CLASS:
219 if (INTEL_GEN(dev_priv) < 8)
221 return GEN8_LR_CONTEXT_OTHER_SIZE;
225 static u32 __engine_mmio_base(struct drm_i915_private *i915,
226 const struct engine_mmio_base *bases)
230 for (i = 0; i < MAX_MMIO_BASES; i++)
231 if (INTEL_GEN(i915) >= bases[i].gen)
234 GEM_BUG_ON(i == MAX_MMIO_BASES);
235 GEM_BUG_ON(!bases[i].base);
237 return bases[i].base;
240 static void __sprint_engine_name(struct intel_engine_cs *engine)
243 * Before we know what the uABI name for this engine will be,
244 * we still would like to keep track of this engine in the debug logs.
245 * We throw in a ' here as a reminder that this isn't its final name.
247 GEM_WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s'%u",
248 intel_engine_class_repr(engine->class),
249 engine->instance) >= sizeof(engine->name));
252 void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
255 * Though they added more rings on g4x/ilk, they did not add
256 * per-engine HWSTAM until gen6.
258 if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS)
261 if (INTEL_GEN(engine->i915) >= 3)
262 ENGINE_WRITE(engine, RING_HWSTAM, mask);
264 ENGINE_WRITE16(engine, RING_HWSTAM, mask);
267 static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
269 /* Mask off all writes into the unknown HWSP */
270 intel_engine_set_hwsp_writemask(engine, ~0u);
273 static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
275 const struct engine_info *info = &intel_engines[id];
276 struct intel_engine_cs *engine;
278 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
279 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
281 if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
284 if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS))
287 if (GEM_DEBUG_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
290 if (GEM_DEBUG_WARN_ON(gt->engine_class[info->class][info->instance]))
293 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
297 BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
300 engine->legacy_idx = INVALID_ENGINE;
301 engine->mask = BIT(id);
302 engine->i915 = gt->i915;
304 engine->uncore = gt->uncore;
305 engine->hw_id = engine->guc_id = info->hw_id;
306 engine->mmio_base = __engine_mmio_base(gt->i915, info->mmio_bases);
308 engine->class = info->class;
309 engine->instance = info->instance;
310 __sprint_engine_name(engine);
312 engine->props.heartbeat_interval_ms =
313 CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
314 engine->props.preempt_timeout_ms =
315 CONFIG_DRM_I915_PREEMPT_TIMEOUT;
316 engine->props.stop_timeout_ms =
317 CONFIG_DRM_I915_STOP_TIMEOUT;
318 engine->props.timeslice_duration_ms =
319 CONFIG_DRM_I915_TIMESLICE_DURATION;
322 * To be overridden by the backend on setup. However to facilitate
323 * cleanup on error during setup, we always provide the destroy vfunc.
325 engine->destroy = (typeof(engine->destroy))kfree;
327 engine->context_size = intel_engine_context_size(gt->i915,
329 if (WARN_ON(engine->context_size > BIT(20)))
330 engine->context_size = 0;
331 if (engine->context_size)
332 DRIVER_CAPS(gt->i915)->has_logical_contexts = true;
334 /* Nothing to do here, execute in order of dependencies */
335 engine->schedule = NULL;
337 seqlock_init(&engine->stats.lock);
339 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
341 /* Scrub mmio state on takeover */
342 intel_engine_sanitize_mmio(engine);
344 gt->engine_class[info->class][info->instance] = engine;
345 gt->engine[id] = engine;
347 intel_engine_add_user(engine);
348 gt->i915->engine[id] = engine;
353 static void __setup_engine_capabilities(struct intel_engine_cs *engine)
355 struct drm_i915_private *i915 = engine->i915;
357 if (engine->class == VIDEO_DECODE_CLASS) {
359 * HEVC support is present on first engine instance
360 * before Gen11 and on all instances afterwards.
362 if (INTEL_GEN(i915) >= 11 ||
363 (INTEL_GEN(i915) >= 9 && engine->instance == 0))
364 engine->uabi_capabilities |=
365 I915_VIDEO_CLASS_CAPABILITY_HEVC;
368 * SFC block is present only on even logical engine
371 if ((INTEL_GEN(i915) >= 11 &&
372 RUNTIME_INFO(i915)->vdbox_sfc_access & engine->mask) ||
373 (INTEL_GEN(i915) >= 9 && engine->instance == 0))
374 engine->uabi_capabilities |=
375 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
376 } else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
377 if (INTEL_GEN(i915) >= 9)
378 engine->uabi_capabilities |=
379 I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
383 static void intel_setup_engine_capabilities(struct intel_gt *gt)
385 struct intel_engine_cs *engine;
386 enum intel_engine_id id;
388 for_each_engine(engine, gt, id)
389 __setup_engine_capabilities(engine);
393 * intel_engines_cleanup() - free the resources allocated for Command Streamers
394 * @gt: pointer to struct intel_gt
396 void intel_engines_cleanup(struct intel_gt *gt)
398 struct intel_engine_cs *engine;
399 enum intel_engine_id id;
401 for_each_engine(engine, gt, id) {
402 engine->destroy(engine);
403 gt->engine[id] = NULL;
404 gt->i915->engine[id] = NULL;
409 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
410 * @gt: pointer to struct intel_gt
412 * Return: non-zero if the initialization failed.
414 int intel_engines_init_mmio(struct intel_gt *gt)
416 struct drm_i915_private *i915 = gt->i915;
417 struct intel_device_info *device_info = mkwrite_device_info(i915);
418 const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask;
419 unsigned int mask = 0;
423 WARN_ON(engine_mask == 0);
424 WARN_ON(engine_mask &
425 GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
427 if (i915_inject_probe_failure(i915))
430 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
431 if (!HAS_ENGINE(i915, i))
434 err = intel_engine_setup(gt, i);
442 * Catch failures to update intel_engines table when the new engines
443 * are added to the driver by a warning and disabling the forgotten
446 if (WARN_ON(mask != engine_mask))
447 device_info->engine_mask = mask;
449 RUNTIME_INFO(i915)->num_engines = hweight32(mask);
451 intel_gt_check_and_clear_faults(gt);
453 intel_setup_engine_capabilities(gt);
458 intel_engines_cleanup(gt);
463 * intel_engines_init() - init the Engine Command Streamers
464 * @gt: pointer to struct intel_gt
466 * Return: non-zero if the initialization failed.
468 int intel_engines_init(struct intel_gt *gt)
470 int (*init)(struct intel_engine_cs *engine);
471 struct intel_engine_cs *engine;
472 enum intel_engine_id id;
475 if (HAS_EXECLISTS(gt->i915))
476 init = intel_execlists_submission_init;
478 init = intel_ring_submission_init;
480 for_each_engine(engine, gt, id) {
489 intel_engines_cleanup(gt);
493 void intel_engine_init_execlists(struct intel_engine_cs *engine)
495 struct intel_engine_execlists * const execlists = &engine->execlists;
497 execlists->port_mask = 1;
498 GEM_BUG_ON(!is_power_of_2(execlists_num_ports(execlists)));
499 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
501 memset(execlists->pending, 0, sizeof(execlists->pending));
503 memset(execlists->inflight, 0, sizeof(execlists->inflight));
505 execlists->queue_priority_hint = INT_MIN;
506 execlists->queue = RB_ROOT_CACHED;
509 static void cleanup_status_page(struct intel_engine_cs *engine)
511 struct i915_vma *vma;
513 /* Prevent writes into HWSP after returning the page to the system */
514 intel_engine_set_hwsp_writemask(engine, ~0u);
516 vma = fetch_and_zero(&engine->status_page.vma);
520 if (!HWS_NEEDS_PHYSICAL(engine->i915))
523 i915_gem_object_unpin_map(vma->obj);
524 i915_gem_object_put(vma->obj);
527 static int pin_ggtt_status_page(struct intel_engine_cs *engine,
528 struct i915_vma *vma)
533 if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
535 * On g33, we cannot place HWS above 256MiB, so
536 * restrict its pinning to the low mappable arena.
537 * Though this restriction is not documented for
538 * gen4, gen5, or byt, they also behave similarly
539 * and hang if the HWS is placed at the top of the
540 * GTT. To generalise, it appears that all !llc
541 * platforms have issues with us placing the HWS
542 * above the mappable region (even though we never
545 flags |= PIN_MAPPABLE;
549 return i915_vma_pin(vma, 0, 0, flags);
552 static int init_status_page(struct intel_engine_cs *engine)
554 struct drm_i915_gem_object *obj;
555 struct i915_vma *vma;
560 * Though the HWS register does support 36bit addresses, historically
561 * we have had hangs and corruption reported due to wild writes if
562 * the HWS is placed above 4G. We only allow objects to be allocated
563 * in GFP_DMA32 for i965, and no earlier physical address users had
564 * access to more than 4G.
566 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
568 DRM_ERROR("Failed to allocate status page\n");
572 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
574 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
580 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
582 ret = PTR_ERR(vaddr);
586 engine->status_page.addr = memset(vaddr, 0, PAGE_SIZE);
587 engine->status_page.vma = vma;
589 if (!HWS_NEEDS_PHYSICAL(engine->i915)) {
590 ret = pin_ggtt_status_page(engine, vma);
598 i915_gem_object_unpin_map(obj);
600 i915_gem_object_put(obj);
604 static int intel_engine_setup_common(struct intel_engine_cs *engine)
608 init_llist_head(&engine->barrier_tasks);
610 err = init_status_page(engine);
614 intel_engine_init_active(engine, ENGINE_PHYSICAL);
615 intel_engine_init_breadcrumbs(engine);
616 intel_engine_init_execlists(engine);
617 intel_engine_init_cmd_parser(engine);
618 intel_engine_init__pm(engine);
620 intel_engine_pool_init(&engine->pool);
622 /* Use the whole device by default */
624 intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
626 intel_engine_init_workarounds(engine);
627 intel_engine_init_whitelist(engine);
628 intel_engine_init_ctx_wa(engine);
634 * intel_engines_setup- setup engine state not requiring hw access
635 * @gt: pointer to struct intel_gt
637 * Initializes engine structure members shared between legacy and execlists
638 * submission modes which do not require hardware access.
640 * Typically done early in the submission mode specific engine setup stage.
642 int intel_engines_setup(struct intel_gt *gt)
644 int (*setup)(struct intel_engine_cs *engine);
645 struct intel_engine_cs *engine;
646 enum intel_engine_id id;
649 if (HAS_EXECLISTS(gt->i915))
650 setup = intel_execlists_submission_setup;
652 setup = intel_ring_submission_setup;
654 for_each_engine(engine, gt, id) {
655 err = intel_engine_setup_common(engine);
663 /* We expect the backend to take control over its state */
664 GEM_BUG_ON(engine->destroy == (typeof(engine->destroy))kfree);
666 GEM_BUG_ON(!engine->cops);
672 intel_engines_cleanup(gt);
676 struct measure_breadcrumb {
677 struct i915_request rq;
678 struct intel_timeline timeline;
679 struct intel_ring ring;
683 static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
685 struct measure_breadcrumb *frame;
688 GEM_BUG_ON(!engine->gt->scratch);
690 frame = kzalloc(sizeof(*frame), GFP_KERNEL);
694 if (intel_timeline_init(&frame->timeline,
696 engine->status_page.vma))
699 mutex_lock(&frame->timeline.mutex);
701 frame->ring.vaddr = frame->cs;
702 frame->ring.size = sizeof(frame->cs);
703 frame->ring.effective_size = frame->ring.size;
704 intel_ring_update_space(&frame->ring);
706 frame->rq.i915 = engine->i915;
707 frame->rq.engine = engine;
708 frame->rq.ring = &frame->ring;
709 rcu_assign_pointer(frame->rq.timeline, &frame->timeline);
711 dw = intel_timeline_pin(&frame->timeline);
715 spin_lock_irq(&engine->active.lock);
716 dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
717 spin_unlock_irq(&engine->active.lock);
719 GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
721 intel_timeline_unpin(&frame->timeline);
724 mutex_unlock(&frame->timeline.mutex);
725 intel_timeline_fini(&frame->timeline);
732 intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
734 INIT_LIST_HEAD(&engine->active.requests);
736 spin_lock_init(&engine->active.lock);
737 lockdep_set_subclass(&engine->active.lock, subclass);
740 * Due to an interesting quirk in lockdep's internal debug tracking,
741 * after setting a subclass we must ensure the lock is used. Otherwise,
742 * nr_unused_locks is incremented once too often.
744 #ifdef CONFIG_DEBUG_LOCK_ALLOC
746 lock_map_acquire(&engine->active.lock.dep_map);
747 lock_map_release(&engine->active.lock.dep_map);
752 static struct intel_context *
753 create_kernel_context(struct intel_engine_cs *engine)
755 static struct lock_class_key kernel;
756 struct intel_context *ce;
759 ce = intel_context_create(engine->i915->kernel_context, engine);
763 ce->ring = __intel_context_ring_size(SZ_4K);
765 err = intel_context_pin(ce);
767 intel_context_put(ce);
772 * Give our perma-pinned kernel timelines a separate lockdep class,
773 * so that we can use them from within the normal user timelines
774 * should we need to inject GPU operations during their request
777 lockdep_set_class(&ce->timeline->mutex, &kernel);
783 * intel_engines_init_common - initialize cengine state which might require hw access
784 * @engine: Engine to initialize.
786 * Initializes @engine@ structure members shared between legacy and execlists
787 * submission modes which do require hardware access.
789 * Typcally done at later stages of submission mode specific engine setup.
791 * Returns zero on success or an error code on failure.
793 int intel_engine_init_common(struct intel_engine_cs *engine)
795 struct intel_context *ce;
798 engine->set_default_submission(engine);
801 * We may need to do things with the shrinker which
802 * require us to immediately switch back to the default
803 * context. This can cause a problem as pinning the
804 * default context also requires GTT space which may not
805 * be available. To avoid this we always pin the default
808 ce = create_kernel_context(engine);
812 engine->kernel_context = ce;
814 ret = measure_breadcrumb_dw(engine);
818 engine->emit_fini_breadcrumb_dw = ret;
823 intel_context_unpin(ce);
824 intel_context_put(ce);
829 * intel_engines_cleanup_common - cleans up the engine state created by
830 * the common initiailizers.
831 * @engine: Engine to cleanup.
833 * This cleans up everything created by the common helpers.
835 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
837 GEM_BUG_ON(!list_empty(&engine->active.requests));
839 cleanup_status_page(engine);
841 intel_engine_pool_fini(&engine->pool);
842 intel_engine_fini_breadcrumbs(engine);
843 intel_engine_cleanup_cmd_parser(engine);
845 if (engine->default_state)
846 i915_gem_object_put(engine->default_state);
848 if (engine->kernel_context) {
849 intel_context_unpin(engine->kernel_context);
850 intel_context_put(engine->kernel_context);
852 GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
854 intel_wa_list_free(&engine->ctx_wa_list);
855 intel_wa_list_free(&engine->wa_list);
856 intel_wa_list_free(&engine->whitelist);
859 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
861 struct drm_i915_private *i915 = engine->i915;
865 if (INTEL_GEN(i915) >= 8)
866 acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
867 else if (INTEL_GEN(i915) >= 4)
868 acthd = ENGINE_READ(engine, RING_ACTHD);
870 acthd = ENGINE_READ(engine, ACTHD);
875 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
879 if (INTEL_GEN(engine->i915) >= 8)
880 bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
882 bbaddr = ENGINE_READ(engine, RING_BBADDR);
887 static unsigned long stop_timeout(const struct intel_engine_cs *engine)
889 if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */
893 * If we are doing a normal GPU reset, we can take our time and allow
894 * the engine to quiesce. We've stopped submission to the engine, and
895 * if we wait long enough an innocent context should complete and
896 * leave the engine idle. So they should not be caught unaware by
897 * the forthcoming GPU reset (which usually follows the stop_cs)!
899 return READ_ONCE(engine->props.stop_timeout_ms);
902 int intel_engine_stop_cs(struct intel_engine_cs *engine)
904 struct intel_uncore *uncore = engine->uncore;
905 const u32 base = engine->mmio_base;
906 const i915_reg_t mode = RING_MI_MODE(base);
909 if (INTEL_GEN(engine->i915) < 3)
912 GEM_TRACE("%s\n", engine->name);
914 intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
917 if (__intel_wait_for_register_fw(uncore,
918 mode, MODE_IDLE, MODE_IDLE,
919 1000, stop_timeout(engine),
921 GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
925 /* A final mmio read to let GPU writes be hopefully flushed to memory */
926 intel_uncore_posting_read_fw(uncore, mode);
931 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
933 GEM_TRACE("%s\n", engine->name);
935 ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
938 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
941 case I915_CACHE_NONE: return " uncached";
942 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
943 case I915_CACHE_L3_LLC: return " L3+LLC";
944 case I915_CACHE_WT: return " WT";
950 read_subslice_reg(struct intel_engine_cs *engine, int slice, int subslice,
953 struct drm_i915_private *i915 = engine->i915;
954 struct intel_uncore *uncore = engine->uncore;
955 u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
956 enum forcewake_domains fw_domains;
958 if (INTEL_GEN(i915) >= 11) {
959 mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
960 mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
962 mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
963 mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
966 fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
968 fw_domains |= intel_uncore_forcewake_for_reg(uncore,
970 FW_REG_READ | FW_REG_WRITE);
972 spin_lock_irq(&uncore->lock);
973 intel_uncore_forcewake_get__locked(uncore, fw_domains);
975 old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
979 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
981 val = intel_uncore_read_fw(uncore, reg);
984 mcr |= old_mcr & mcr_mask;
986 intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
988 intel_uncore_forcewake_put__locked(uncore, fw_domains);
989 spin_unlock_irq(&uncore->lock);
994 /* NB: please notice the memset */
995 void intel_engine_get_instdone(struct intel_engine_cs *engine,
996 struct intel_instdone *instdone)
998 struct drm_i915_private *i915 = engine->i915;
999 const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
1000 struct intel_uncore *uncore = engine->uncore;
1001 u32 mmio_base = engine->mmio_base;
1005 memset(instdone, 0, sizeof(*instdone));
1007 switch (INTEL_GEN(i915)) {
1009 instdone->instdone =
1010 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1012 if (engine->id != RCS0)
1015 instdone->slice_common =
1016 intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1017 for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
1018 instdone->sampler[slice][subslice] =
1019 read_subslice_reg(engine, slice, subslice,
1020 GEN7_SAMPLER_INSTDONE);
1021 instdone->row[slice][subslice] =
1022 read_subslice_reg(engine, slice, subslice,
1027 instdone->instdone =
1028 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1030 if (engine->id != RCS0)
1033 instdone->slice_common =
1034 intel_uncore_read(uncore, GEN7_SC_INSTDONE);
1035 instdone->sampler[0][0] =
1036 intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
1037 instdone->row[0][0] =
1038 intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
1044 instdone->instdone =
1045 intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
1046 if (engine->id == RCS0)
1047 /* HACK: Using the wrong struct member */
1048 instdone->slice_common =
1049 intel_uncore_read(uncore, GEN4_INSTDONE1);
1053 instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
1058 static bool ring_is_idle(struct intel_engine_cs *engine)
1062 if (I915_SELFTEST_ONLY(!engine->mmio_base))
1065 if (!intel_engine_pm_get_if_awake(engine))
1068 /* First check that no commands are left in the ring */
1069 if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
1070 (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
1073 /* No bit for gen2, so assume the CS parser is idle */
1074 if (INTEL_GEN(engine->i915) > 2 &&
1075 !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
1078 intel_engine_pm_put(engine);
1083 void intel_engine_flush_submission(struct intel_engine_cs *engine)
1085 struct tasklet_struct *t = &engine->execlists.tasklet;
1087 if (__tasklet_is_scheduled(t)) {
1089 if (tasklet_trylock(t)) {
1090 /* Must wait for any GPU reset in progress. */
1091 if (__tasklet_is_enabled(t))
1098 /* Otherwise flush the tasklet if it was running on another cpu */
1099 tasklet_unlock_wait(t);
1103 * intel_engine_is_idle() - Report if the engine has finished process all work
1104 * @engine: the intel_engine_cs
1106 * Return true if there are no requests pending, nothing left to be submitted
1107 * to hardware, and that the engine is idle.
1109 bool intel_engine_is_idle(struct intel_engine_cs *engine)
1111 /* More white lies, if wedged, hw state is inconsistent */
1112 if (intel_gt_is_wedged(engine->gt))
1115 if (!intel_engine_pm_is_awake(engine))
1118 /* Waiting to drain ELSP? */
1119 if (execlists_active(&engine->execlists)) {
1120 synchronize_hardirq(engine->i915->drm.pdev->irq);
1122 intel_engine_flush_submission(engine);
1124 if (execlists_active(&engine->execlists))
1128 /* ELSP is empty, but there are ready requests? E.g. after reset */
1129 if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
1133 return ring_is_idle(engine);
1136 bool intel_engines_are_idle(struct intel_gt *gt)
1138 struct intel_engine_cs *engine;
1139 enum intel_engine_id id;
1142 * If the driver is wedged, HW state may be very inconsistent and
1143 * report that it is still busy, even though we have stopped using it.
1145 if (intel_gt_is_wedged(gt))
1148 /* Already parked (and passed an idleness test); must still be idle */
1149 if (!READ_ONCE(gt->awake))
1152 for_each_engine(engine, gt, id) {
1153 if (!intel_engine_is_idle(engine))
1160 void intel_engines_reset_default_submission(struct intel_gt *gt)
1162 struct intel_engine_cs *engine;
1163 enum intel_engine_id id;
1165 for_each_engine(engine, gt, id)
1166 engine->set_default_submission(engine);
1169 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1171 switch (INTEL_GEN(engine->i915)) {
1173 return false; /* uses physical not virtual addresses */
1175 /* maybe only uses physical not virtual addresses */
1176 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1178 return !IS_I965G(engine->i915); /* who knows! */
1180 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1186 static int print_sched_attr(struct drm_i915_private *i915,
1187 const struct i915_sched_attr *attr,
1188 char *buf, int x, int len)
1190 if (attr->priority == I915_PRIORITY_INVALID)
1193 x += snprintf(buf + x, len - x,
1194 " prio=%d", attr->priority);
1199 static void print_request(struct drm_printer *m,
1200 struct i915_request *rq,
1203 const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
1207 x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
1209 drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
1211 rq->fence.context, rq->fence.seqno,
1212 i915_request_completed(rq) ? "!" :
1213 i915_request_started(rq) ? "*" :
1215 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
1216 &rq->fence.flags) ? "+" :
1217 test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
1218 &rq->fence.flags) ? "-" :
1221 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
1225 static void hexdump(struct drm_printer *m, const void *buf, size_t len)
1227 const size_t rowsize = 8 * sizeof(u32);
1228 const void *prev = NULL;
1232 for (pos = 0; pos < len; pos += rowsize) {
1235 if (prev && !memcmp(prev, buf + pos, rowsize)) {
1237 drm_printf(m, "*\n");
1243 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
1244 rowsize, sizeof(u32),
1246 false) >= sizeof(line));
1247 drm_printf(m, "[%04zx] %s\n", pos, line);
1254 static struct intel_timeline *get_timeline(struct i915_request *rq)
1256 struct intel_timeline *tl;
1259 * Even though we are holding the engine->active.lock here, there
1260 * is no control over the submission queue per-se and we are
1261 * inspecting the active state at a random point in time, with an
1262 * unknown queue. Play safe and make sure the timeline remains valid.
1263 * (Only being used for pretty printing, one extra kref shouldn't
1264 * cause a camel stampede!)
1267 tl = rcu_dereference(rq->timeline);
1268 if (!kref_get_unless_zero(&tl->kref))
1275 static const char *repr_timer(const struct timer_list *t)
1277 if (!READ_ONCE(t->expires))
1280 if (timer_pending(t))
1286 static void intel_engine_print_registers(struct intel_engine_cs *engine,
1287 struct drm_printer *m)
1289 struct drm_i915_private *dev_priv = engine->i915;
1290 struct intel_engine_execlists * const execlists = &engine->execlists;
1293 if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
1294 drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
1295 drm_printf(m, "\tRING_START: 0x%08x\n",
1296 ENGINE_READ(engine, RING_START));
1297 drm_printf(m, "\tRING_HEAD: 0x%08x\n",
1298 ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
1299 drm_printf(m, "\tRING_TAIL: 0x%08x\n",
1300 ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
1301 drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
1302 ENGINE_READ(engine, RING_CTL),
1303 ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
1304 if (INTEL_GEN(engine->i915) > 2) {
1305 drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
1306 ENGINE_READ(engine, RING_MI_MODE),
1307 ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
1310 if (INTEL_GEN(dev_priv) >= 6) {
1311 drm_printf(m, "\tRING_IMR: %08x\n",
1312 ENGINE_READ(engine, RING_IMR));
1315 addr = intel_engine_get_active_head(engine);
1316 drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
1317 upper_32_bits(addr), lower_32_bits(addr));
1318 addr = intel_engine_get_last_batch_head(engine);
1319 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
1320 upper_32_bits(addr), lower_32_bits(addr));
1321 if (INTEL_GEN(dev_priv) >= 8)
1322 addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
1323 else if (INTEL_GEN(dev_priv) >= 4)
1324 addr = ENGINE_READ(engine, RING_DMA_FADD);
1326 addr = ENGINE_READ(engine, DMA_FADD_I8XX);
1327 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
1328 upper_32_bits(addr), lower_32_bits(addr));
1329 if (INTEL_GEN(dev_priv) >= 4) {
1330 drm_printf(m, "\tIPEIR: 0x%08x\n",
1331 ENGINE_READ(engine, RING_IPEIR));
1332 drm_printf(m, "\tIPEHR: 0x%08x\n",
1333 ENGINE_READ(engine, RING_IPEHR));
1335 drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
1336 drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
1339 if (HAS_EXECLISTS(dev_priv)) {
1340 struct i915_request * const *port, *rq;
1342 &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
1343 const u8 num_entries = execlists->csb_size;
1347 drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
1348 yesno(test_bit(TASKLET_STATE_SCHED,
1349 &engine->execlists.tasklet.state)),
1350 enableddisabled(!atomic_read(&engine->execlists.tasklet.count)),
1351 repr_timer(&engine->execlists.preempt),
1352 repr_timer(&engine->execlists.timer));
1354 read = execlists->csb_head;
1355 write = READ_ONCE(*execlists->csb_write);
1357 drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n",
1358 ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
1359 ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
1360 read, write, num_entries);
1362 if (read >= num_entries)
1364 if (write >= num_entries)
1367 write += num_entries;
1368 while (read < write) {
1369 idx = ++read % num_entries;
1370 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
1371 idx, hws[idx * 2], hws[idx * 2 + 1]);
1374 execlists_active_lock_bh(execlists);
1375 for (port = execlists->active; (rq = *port); port++) {
1379 len = snprintf(hdr, sizeof(hdr),
1381 (int)(port - execlists->active));
1382 if (!i915_request_signaled(rq)) {
1383 struct intel_timeline *tl = get_timeline(rq);
1385 len += snprintf(hdr + len, sizeof(hdr) - len,
1386 "ring:{start:%08x, hwsp:%08x, seqno:%08x}, ",
1387 i915_ggtt_offset(rq->ring->vma),
1388 tl ? tl->hwsp_offset : 0,
1392 intel_timeline_put(tl);
1394 snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
1395 print_request(m, rq, hdr);
1397 for (port = execlists->pending; (rq = *port); port++) {
1398 struct intel_timeline *tl = get_timeline(rq);
1401 snprintf(hdr, sizeof(hdr),
1402 "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
1403 (int)(port - execlists->pending),
1404 i915_ggtt_offset(rq->ring->vma),
1405 tl ? tl->hwsp_offset : 0,
1407 print_request(m, rq, hdr);
1410 intel_timeline_put(tl);
1412 execlists_active_unlock_bh(execlists);
1413 } else if (INTEL_GEN(dev_priv) > 6) {
1414 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
1415 ENGINE_READ(engine, RING_PP_DIR_BASE));
1416 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
1417 ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
1418 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
1419 ENGINE_READ(engine, RING_PP_DIR_DCLV));
1423 static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
1429 "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
1430 rq->head, rq->postfix, rq->tail,
1431 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
1432 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
1434 size = rq->tail - rq->head;
1435 if (rq->tail < rq->head)
1436 size += rq->ring->size;
1438 ring = kmalloc(size, GFP_ATOMIC);
1440 const void *vaddr = rq->ring->vaddr;
1441 unsigned int head = rq->head;
1442 unsigned int len = 0;
1444 if (rq->tail < head) {
1445 len = rq->ring->size - head;
1446 memcpy(ring, vaddr + head, len);
1449 memcpy(ring + len, vaddr + head, size - len);
1451 hexdump(m, ring, size);
1456 void intel_engine_dump(struct intel_engine_cs *engine,
1457 struct drm_printer *m,
1458 const char *header, ...)
1460 struct i915_gpu_error * const error = &engine->i915->gpu_error;
1461 struct i915_request *rq;
1462 intel_wakeref_t wakeref;
1463 unsigned long flags;
1468 va_start(ap, header);
1469 drm_vprintf(m, header, &ap);
1473 if (intel_gt_is_wedged(engine->gt))
1474 drm_printf(m, "*** WEDGED ***\n");
1476 drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
1479 rq = READ_ONCE(engine->heartbeat.systole);
1481 drm_printf(m, "\tHeartbeat: %d ms ago\n",
1482 jiffies_to_msecs(jiffies - rq->emitted_jiffies));
1484 drm_printf(m, "\tReset count: %d (global %d)\n",
1485 i915_reset_engine_count(error, engine),
1486 i915_reset_count(error));
1488 drm_printf(m, "\tRequests:\n");
1490 spin_lock_irqsave(&engine->active.lock, flags);
1491 rq = intel_engine_find_active_request(engine);
1493 struct intel_timeline *tl = get_timeline(rq);
1495 print_request(m, rq, "\t\tactive ");
1497 drm_printf(m, "\t\tring->start: 0x%08x\n",
1498 i915_ggtt_offset(rq->ring->vma));
1499 drm_printf(m, "\t\tring->head: 0x%08x\n",
1501 drm_printf(m, "\t\tring->tail: 0x%08x\n",
1503 drm_printf(m, "\t\tring->emit: 0x%08x\n",
1505 drm_printf(m, "\t\tring->space: 0x%08x\n",
1509 drm_printf(m, "\t\tring->hwsp: 0x%08x\n",
1511 intel_timeline_put(tl);
1514 print_request_ring(m, rq);
1516 if (rq->hw_context->lrc_reg_state) {
1517 drm_printf(m, "Logical Ring Context:\n");
1518 hexdump(m, rq->hw_context->lrc_reg_state, PAGE_SIZE);
1521 spin_unlock_irqrestore(&engine->active.lock, flags);
1523 drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
1524 wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
1526 intel_engine_print_registers(engine, m);
1527 intel_runtime_pm_put(engine->uncore->rpm, wakeref);
1529 drm_printf(m, "\tDevice is asleep; skipping register dump\n");
1532 intel_execlists_show_requests(engine, m, print_request, 8);
1534 drm_printf(m, "HWSP:\n");
1535 hexdump(m, engine->status_page.addr, PAGE_SIZE);
1537 drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
1539 intel_engine_print_breadcrumbs(engine, m);
1543 * intel_enable_engine_stats() - Enable engine busy tracking on engine
1544 * @engine: engine to enable stats collection
1546 * Start collecting the engine busyness data for @engine.
1548 * Returns 0 on success or a negative error code.
1550 int intel_enable_engine_stats(struct intel_engine_cs *engine)
1552 struct intel_engine_execlists *execlists = &engine->execlists;
1553 unsigned long flags;
1556 if (!intel_engine_supports_stats(engine))
1559 execlists_active_lock_bh(execlists);
1560 write_seqlock_irqsave(&engine->stats.lock, flags);
1562 if (unlikely(engine->stats.enabled == ~0)) {
1567 if (engine->stats.enabled++ == 0) {
1568 struct i915_request * const *port;
1569 struct i915_request *rq;
1571 engine->stats.enabled_at = ktime_get();
1573 /* XXX submission method oblivious? */
1574 for (port = execlists->active; (rq = *port); port++)
1575 engine->stats.active++;
1577 for (port = execlists->pending; (rq = *port); port++) {
1578 /* Exclude any contexts already counted in active */
1579 if (!intel_context_inflight_count(rq->hw_context))
1580 engine->stats.active++;
1583 if (engine->stats.active)
1584 engine->stats.start = engine->stats.enabled_at;
1588 write_sequnlock_irqrestore(&engine->stats.lock, flags);
1589 execlists_active_unlock_bh(execlists);
1594 static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
1596 ktime_t total = engine->stats.total;
1599 * If the engine is executing something at the moment
1600 * add it to the total.
1602 if (engine->stats.active)
1603 total = ktime_add(total,
1604 ktime_sub(ktime_get(), engine->stats.start));
1610 * intel_engine_get_busy_time() - Return current accumulated engine busyness
1611 * @engine: engine to report on
1613 * Returns accumulated time @engine was busy since engine stats were enabled.
1615 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
1621 seq = read_seqbegin(&engine->stats.lock);
1622 total = __intel_engine_get_busy_time(engine);
1623 } while (read_seqretry(&engine->stats.lock, seq));
1629 * intel_disable_engine_stats() - Disable engine busy tracking on engine
1630 * @engine: engine to disable stats collection
1632 * Stops collecting the engine busyness data for @engine.
1634 void intel_disable_engine_stats(struct intel_engine_cs *engine)
1636 unsigned long flags;
1638 if (!intel_engine_supports_stats(engine))
1641 write_seqlock_irqsave(&engine->stats.lock, flags);
1642 WARN_ON_ONCE(engine->stats.enabled == 0);
1643 if (--engine->stats.enabled == 0) {
1644 engine->stats.total = __intel_engine_get_busy_time(engine);
1645 engine->stats.active = 0;
1647 write_sequnlock_irqrestore(&engine->stats.lock, flags);
1650 static bool match_ring(struct i915_request *rq)
1652 u32 ring = ENGINE_READ(rq->engine, RING_START);
1654 return ring == i915_ggtt_offset(rq->ring->vma);
1657 struct i915_request *
1658 intel_engine_find_active_request(struct intel_engine_cs *engine)
1660 struct i915_request *request, *active = NULL;
1663 * We are called by the error capture, reset and to dump engine
1664 * state at random points in time. In particular, note that neither is
1665 * crucially ordered with an interrupt. After a hang, the GPU is dead
1666 * and we assume that no more writes can happen (we waited long enough
1667 * for all writes that were in transaction to be flushed) - adding an
1668 * extra delay for a recent interrupt is pointless. Hence, we do
1669 * not need an engine->irq_seqno_barrier() before the seqno reads.
1670 * At all other times, we must assume the GPU is still running, but
1671 * we only care about the snapshot of this moment.
1673 lockdep_assert_held(&engine->active.lock);
1674 list_for_each_entry(request, &engine->active.requests, sched.link) {
1675 if (i915_request_completed(request))
1678 if (!i915_request_started(request))
1681 /* More than one preemptible request may match! */
1682 if (!match_ring(request))
1692 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1693 #include "mock_engine.c"
1694 #include "selftest_engine.c"
1695 #include "selftest_engine_cs.c"