2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <drm/drm_print.h>
28 #include "intel_ringbuffer.h"
29 #include "intel_lrc.h"
31 /* Haswell does have the CXT_SIZE register however it does not appear to be
32 * valid. Now, docs explain in dwords what is in the context object. The full
33 * size is 70720 bytes, however, the power context and execlist context will
34 * never be saved (power context is stored elsewhere, and execlists don't work
35 * on HSW) - so the final size, including the extra state required for the
36 * Resource Streamer, is 66944 bytes, which rounds to 17 pages.
38 #define HSW_CXT_TOTAL_SIZE (17 * PAGE_SIZE)
40 #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
41 #define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
42 #define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
43 #define GEN10_LR_CONTEXT_RENDER_SIZE (18 * PAGE_SIZE)
44 #define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
46 #define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
48 struct engine_class_info {
50 int (*init_legacy)(struct intel_engine_cs *engine);
51 int (*init_execlists)(struct intel_engine_cs *engine);
56 static const struct engine_class_info intel_engine_classes[] = {
59 .init_execlists = logical_render_ring_init,
60 .init_legacy = intel_init_render_ring_buffer,
61 .uabi_class = I915_ENGINE_CLASS_RENDER,
63 [COPY_ENGINE_CLASS] = {
65 .init_execlists = logical_xcs_ring_init,
66 .init_legacy = intel_init_blt_ring_buffer,
67 .uabi_class = I915_ENGINE_CLASS_COPY,
69 [VIDEO_DECODE_CLASS] = {
71 .init_execlists = logical_xcs_ring_init,
72 .init_legacy = intel_init_bsd_ring_buffer,
73 .uabi_class = I915_ENGINE_CLASS_VIDEO,
75 [VIDEO_ENHANCEMENT_CLASS] = {
77 .init_execlists = logical_xcs_ring_init,
78 .init_legacy = intel_init_vebox_ring_buffer,
79 .uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE,
83 #define MAX_MMIO_BASES 3
89 /* mmio bases table *must* be sorted in reverse gen order */
90 struct engine_mmio_base {
93 } mmio_bases[MAX_MMIO_BASES];
96 static const struct engine_info intel_engines[] = {
99 .uabi_id = I915_EXEC_RENDER,
100 .class = RENDER_CLASS,
103 { .gen = 1, .base = RENDER_RING_BASE }
108 .uabi_id = I915_EXEC_BLT,
109 .class = COPY_ENGINE_CLASS,
112 { .gen = 6, .base = BLT_RING_BASE }
117 .uabi_id = I915_EXEC_BSD,
118 .class = VIDEO_DECODE_CLASS,
121 { .gen = 11, .base = GEN11_BSD_RING_BASE },
122 { .gen = 6, .base = GEN6_BSD_RING_BASE },
123 { .gen = 4, .base = BSD_RING_BASE }
128 .uabi_id = I915_EXEC_BSD,
129 .class = VIDEO_DECODE_CLASS,
132 { .gen = 11, .base = GEN11_BSD2_RING_BASE },
133 { .gen = 8, .base = GEN8_BSD2_RING_BASE }
138 .uabi_id = I915_EXEC_BSD,
139 .class = VIDEO_DECODE_CLASS,
142 { .gen = 11, .base = GEN11_BSD3_RING_BASE }
147 .uabi_id = I915_EXEC_BSD,
148 .class = VIDEO_DECODE_CLASS,
151 { .gen = 11, .base = GEN11_BSD4_RING_BASE }
156 .uabi_id = I915_EXEC_VEBOX,
157 .class = VIDEO_ENHANCEMENT_CLASS,
160 { .gen = 11, .base = GEN11_VEBOX_RING_BASE },
161 { .gen = 7, .base = VEBOX_RING_BASE }
166 .uabi_id = I915_EXEC_VEBOX,
167 .class = VIDEO_ENHANCEMENT_CLASS,
170 { .gen = 11, .base = GEN11_VEBOX2_RING_BASE }
176 * ___intel_engine_context_size() - return the size of the context for an engine
177 * @dev_priv: i915 device private
178 * @class: engine class
180 * Each engine class may require a different amount of space for a context
183 * Return: size (in bytes) of an engine class specific context image
185 * Note: this size includes the HWSP, which is part of the context image
186 * in LRC mode, but does not include the "shared data page" used with
187 * GuC submission. The caller should account for this if using the GuC.
190 __intel_engine_context_size(struct drm_i915_private *dev_priv, u8 class)
194 BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
198 switch (INTEL_GEN(dev_priv)) {
200 MISSING_CASE(INTEL_GEN(dev_priv));
201 return DEFAULT_LR_CONTEXT_RENDER_SIZE;
203 return GEN11_LR_CONTEXT_RENDER_SIZE;
205 return GEN10_LR_CONTEXT_RENDER_SIZE;
207 return GEN9_LR_CONTEXT_RENDER_SIZE;
209 return GEN8_LR_CONTEXT_RENDER_SIZE;
211 if (IS_HASWELL(dev_priv))
212 return HSW_CXT_TOTAL_SIZE;
214 cxt_size = I915_READ(GEN7_CXT_SIZE);
215 return round_up(GEN7_CXT_TOTAL_SIZE(cxt_size) * 64,
218 cxt_size = I915_READ(CXT_SIZE);
219 return round_up(GEN6_CXT_TOTAL_SIZE(cxt_size) * 64,
225 /* For the special day when i810 gets merged. */
233 case VIDEO_DECODE_CLASS:
234 case VIDEO_ENHANCEMENT_CLASS:
235 case COPY_ENGINE_CLASS:
236 if (INTEL_GEN(dev_priv) < 8)
238 return GEN8_LR_CONTEXT_OTHER_SIZE;
242 static u32 __engine_mmio_base(struct drm_i915_private *i915,
243 const struct engine_mmio_base *bases)
247 for (i = 0; i < MAX_MMIO_BASES; i++)
248 if (INTEL_GEN(i915) >= bases[i].gen)
251 GEM_BUG_ON(i == MAX_MMIO_BASES);
252 GEM_BUG_ON(!bases[i].base);
254 return bases[i].base;
257 static void __sprint_engine_name(char *name, const struct engine_info *info)
259 WARN_ON(snprintf(name, INTEL_ENGINE_CS_MAX_NAME, "%s%u",
260 intel_engine_classes[info->class].name,
261 info->instance) >= INTEL_ENGINE_CS_MAX_NAME);
265 intel_engine_setup(struct drm_i915_private *dev_priv,
266 enum intel_engine_id id)
268 const struct engine_info *info = &intel_engines[id];
269 struct intel_engine_cs *engine;
271 GEM_BUG_ON(info->class >= ARRAY_SIZE(intel_engine_classes));
273 BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
274 BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
276 if (GEM_WARN_ON(info->class > MAX_ENGINE_CLASS))
279 if (GEM_WARN_ON(info->instance > MAX_ENGINE_INSTANCE))
282 if (GEM_WARN_ON(dev_priv->engine_class[info->class][info->instance]))
285 GEM_BUG_ON(dev_priv->engine[id]);
286 engine = kzalloc(sizeof(*engine), GFP_KERNEL);
291 engine->i915 = dev_priv;
292 __sprint_engine_name(engine->name, info);
293 engine->hw_id = engine->guc_id = info->hw_id;
294 engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
295 engine->class = info->class;
296 engine->instance = info->instance;
298 engine->uabi_id = info->uabi_id;
299 engine->uabi_class = intel_engine_classes[info->class].uabi_class;
301 engine->context_size = __intel_engine_context_size(dev_priv,
303 if (WARN_ON(engine->context_size > BIT(20)))
304 engine->context_size = 0;
305 if (engine->context_size)
306 DRIVER_CAPS(dev_priv)->has_logical_contexts = true;
308 /* Nothing to do here, execute in order of dependencies */
309 engine->schedule = NULL;
311 seqlock_init(&engine->stats.lock);
313 ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
315 dev_priv->engine_class[info->class][info->instance] = engine;
316 dev_priv->engine[id] = engine;
321 * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
322 * @dev_priv: i915 device private
324 * Return: non-zero if the initialization failed.
326 int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
328 struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
329 const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
330 struct intel_engine_cs *engine;
331 enum intel_engine_id id;
332 unsigned int mask = 0;
336 WARN_ON(ring_mask == 0);
338 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
340 for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
341 if (!HAS_ENGINE(dev_priv, i))
344 err = intel_engine_setup(dev_priv, i);
348 mask |= ENGINE_MASK(i);
352 * Catch failures to update intel_engines table when the new engines
353 * are added to the driver by a warning and disabling the forgotten
356 if (WARN_ON(mask != ring_mask))
357 device_info->ring_mask = mask;
359 /* We always presume we have at least RCS available for later probing */
360 if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
365 device_info->num_rings = hweight32(mask);
367 i915_check_and_clear_faults(dev_priv);
372 for_each_engine(engine, dev_priv, id)
378 * intel_engines_init() - init the Engine Command Streamers
379 * @dev_priv: i915 device private
381 * Return: non-zero if the initialization failed.
383 int intel_engines_init(struct drm_i915_private *dev_priv)
385 struct intel_engine_cs *engine;
386 enum intel_engine_id id, err_id;
389 for_each_engine(engine, dev_priv, id) {
390 const struct engine_class_info *class_info =
391 &intel_engine_classes[engine->class];
392 int (*init)(struct intel_engine_cs *engine);
394 if (HAS_EXECLISTS(dev_priv))
395 init = class_info->init_execlists;
397 init = class_info->init_legacy;
402 if (GEM_WARN_ON(!init))
409 GEM_BUG_ON(!engine->submit_request);
415 for_each_engine(engine, dev_priv, id) {
418 dev_priv->engine[id] = NULL;
420 dev_priv->gt.cleanup_engine(engine);
426 void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
428 struct drm_i915_private *dev_priv = engine->i915;
430 /* Our semaphore implementation is strictly monotonic (i.e. we proceed
431 * so long as the semaphore value in the register/page is greater
432 * than the sync value), so whenever we reset the seqno,
433 * so long as we reset the tracking semaphore value to 0, it will
434 * always be before the next request's seqno. If we don't reset
435 * the semaphore value, then when the seqno moves backwards all
436 * future waits will complete instantly (causing rendering corruption).
438 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
439 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
440 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
441 if (HAS_VEBOX(dev_priv))
442 I915_WRITE(RING_SYNC_2(engine->mmio_base), 0);
445 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
446 clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
448 /* After manually advancing the seqno, fake the interrupt in case
449 * there are any waiters for that seqno.
451 intel_engine_wakeup(engine);
453 GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
456 static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
458 i915_gem_batch_pool_init(&engine->batch_pool, engine);
461 static void intel_engine_init_execlist(struct intel_engine_cs *engine)
463 struct intel_engine_execlists * const execlists = &engine->execlists;
465 execlists->port_mask = 1;
466 BUILD_BUG_ON_NOT_POWER_OF_2(execlists_num_ports(execlists));
467 GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
469 execlists->queue_priority = INT_MIN;
470 execlists->queue = RB_ROOT_CACHED;
474 * intel_engines_setup_common - setup engine state not requiring hw access
475 * @engine: Engine to setup.
477 * Initializes @engine@ structure members shared between legacy and execlists
478 * submission modes which do not require hardware access.
480 * Typically done early in the submission mode specific engine setup stage.
482 void intel_engine_setup_common(struct intel_engine_cs *engine)
484 i915_timeline_init(engine->i915, &engine->timeline, engine->name);
485 lockdep_set_subclass(&engine->timeline.lock, TIMELINE_ENGINE);
487 intel_engine_init_execlist(engine);
488 intel_engine_init_hangcheck(engine);
489 intel_engine_init_batch_pool(engine);
490 intel_engine_init_cmd_parser(engine);
493 int intel_engine_create_scratch(struct intel_engine_cs *engine,
496 struct drm_i915_gem_object *obj;
497 struct i915_vma *vma;
500 WARN_ON(engine->scratch);
502 obj = i915_gem_object_create_stolen(engine->i915, size);
504 obj = i915_gem_object_create_internal(engine->i915, size);
506 DRM_ERROR("Failed to allocate scratch page\n");
510 vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
516 ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
520 engine->scratch = vma;
524 i915_gem_object_put(obj);
528 void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
530 i915_vma_unpin_and_release(&engine->scratch, 0);
533 static void cleanup_status_page(struct intel_engine_cs *engine)
535 if (HWS_NEEDS_PHYSICAL(engine->i915)) {
536 void *addr = fetch_and_zero(&engine->status_page.page_addr);
538 __free_page(virt_to_page(addr));
541 i915_vma_unpin_and_release(&engine->status_page.vma,
542 I915_VMA_RELEASE_MAP);
545 static int init_status_page(struct intel_engine_cs *engine)
547 struct drm_i915_gem_object *obj;
548 struct i915_vma *vma;
553 obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
555 DRM_ERROR("Failed to allocate status page\n");
559 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
563 vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
570 if (!HAS_LLC(engine->i915))
571 /* On g33, we cannot place HWS above 256MiB, so
572 * restrict its pinning to the low mappable arena.
573 * Though this restriction is not documented for
574 * gen4, gen5, or byt, they also behave similarly
575 * and hang if the HWS is placed at the top of the
576 * GTT. To generalise, it appears that all !llc
577 * platforms have issues with us placing the HWS
578 * above the mappable region (even though we never
581 flags |= PIN_MAPPABLE;
584 ret = i915_vma_pin(vma, 0, 0, flags);
588 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
590 ret = PTR_ERR(vaddr);
594 engine->status_page.vma = vma;
595 engine->status_page.ggtt_offset = i915_ggtt_offset(vma);
596 engine->status_page.page_addr = memset(vaddr, 0, PAGE_SIZE);
602 i915_gem_object_put(obj);
606 static int init_phys_status_page(struct intel_engine_cs *engine)
611 * Though the HWS register does support 36bit addresses, historically
612 * we have had hangs and corruption reported due to wild writes if
613 * the HWS is placed above 4G.
615 page = alloc_page(GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO);
619 engine->status_page.page_addr = page_address(page);
624 static void __intel_context_unpin(struct i915_gem_context *ctx,
625 struct intel_engine_cs *engine)
627 intel_context_unpin(to_intel_context(ctx, engine));
631 * intel_engines_init_common - initialize cengine state which might require hw access
632 * @engine: Engine to initialize.
634 * Initializes @engine@ structure members shared between legacy and execlists
635 * submission modes which do require hardware access.
637 * Typcally done at later stages of submission mode specific engine setup.
639 * Returns zero on success or an error code on failure.
641 int intel_engine_init_common(struct intel_engine_cs *engine)
643 struct drm_i915_private *i915 = engine->i915;
644 struct intel_context *ce;
647 engine->set_default_submission(engine);
649 /* We may need to do things with the shrinker which
650 * require us to immediately switch back to the default
651 * context. This can cause a problem as pinning the
652 * default context also requires GTT space which may not
653 * be available. To avoid this we always pin the default
656 ce = intel_context_pin(i915->kernel_context, engine);
661 * Similarly the preempt context must always be available so that
662 * we can interrupt the engine at any time.
664 if (i915->preempt_context) {
665 ce = intel_context_pin(i915->preempt_context, engine);
668 goto err_unpin_kernel;
672 ret = intel_engine_init_breadcrumbs(engine);
674 goto err_unpin_preempt;
676 if (HWS_NEEDS_PHYSICAL(i915))
677 ret = init_phys_status_page(engine);
679 ret = init_status_page(engine);
681 goto err_breadcrumbs;
686 intel_engine_fini_breadcrumbs(engine);
688 if (i915->preempt_context)
689 __intel_context_unpin(i915->preempt_context, engine);
692 __intel_context_unpin(i915->kernel_context, engine);
697 * intel_engines_cleanup_common - cleans up the engine state created by
698 * the common initiailizers.
699 * @engine: Engine to cleanup.
701 * This cleans up everything created by the common helpers.
703 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
705 struct drm_i915_private *i915 = engine->i915;
707 intel_engine_cleanup_scratch(engine);
709 cleanup_status_page(engine);
711 intel_engine_fini_breadcrumbs(engine);
712 intel_engine_cleanup_cmd_parser(engine);
713 i915_gem_batch_pool_fini(&engine->batch_pool);
715 if (engine->default_state)
716 i915_gem_object_put(engine->default_state);
718 if (i915->preempt_context)
719 __intel_context_unpin(i915->preempt_context, engine);
720 __intel_context_unpin(i915->kernel_context, engine);
722 i915_timeline_fini(&engine->timeline);
725 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
727 struct drm_i915_private *dev_priv = engine->i915;
730 if (INTEL_GEN(dev_priv) >= 8)
731 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
732 RING_ACTHD_UDW(engine->mmio_base));
733 else if (INTEL_GEN(dev_priv) >= 4)
734 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
736 acthd = I915_READ(ACTHD);
741 u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
743 struct drm_i915_private *dev_priv = engine->i915;
746 if (INTEL_GEN(dev_priv) >= 8)
747 bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
748 RING_BBADDR_UDW(engine->mmio_base));
750 bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
755 int intel_engine_stop_cs(struct intel_engine_cs *engine)
757 struct drm_i915_private *dev_priv = engine->i915;
758 const u32 base = engine->mmio_base;
759 const i915_reg_t mode = RING_MI_MODE(base);
762 if (INTEL_GEN(dev_priv) < 3)
765 GEM_TRACE("%s\n", engine->name);
767 I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
770 if (__intel_wait_for_register_fw(dev_priv,
771 mode, MODE_IDLE, MODE_IDLE,
774 GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name);
778 /* A final mmio read to let GPU writes be hopefully flushed to memory */
779 POSTING_READ_FW(mode);
784 void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
786 struct drm_i915_private *dev_priv = engine->i915;
788 GEM_TRACE("%s\n", engine->name);
790 I915_WRITE_FW(RING_MI_MODE(engine->mmio_base),
791 _MASKED_BIT_DISABLE(STOP_RING));
794 const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
797 case I915_CACHE_NONE: return " uncached";
798 case I915_CACHE_LLC: return HAS_LLC(i915) ? " LLC" : " snooped";
799 case I915_CACHE_L3_LLC: return " L3+LLC";
800 case I915_CACHE_WT: return " WT";
805 u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
807 const struct sseu_dev_info *sseu = &(INTEL_INFO(dev_priv)->sseu);
809 u32 slice = fls(sseu->slice_mask);
810 u32 subslice = fls(sseu->subslice_mask[slice]);
812 if (INTEL_GEN(dev_priv) == 10)
813 mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
814 GEN8_MCR_SUBSLICE(subslice);
815 else if (INTEL_GEN(dev_priv) >= 11)
816 mcr_s_ss_select = GEN11_MCR_SLICE(slice) |
817 GEN11_MCR_SUBSLICE(subslice);
821 return mcr_s_ss_select;
824 static inline uint32_t
825 read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
826 int subslice, i915_reg_t reg)
828 uint32_t mcr_slice_subslice_mask;
829 uint32_t mcr_slice_subslice_select;
830 uint32_t default_mcr_s_ss_select;
833 enum forcewake_domains fw_domains;
835 if (INTEL_GEN(dev_priv) >= 11) {
836 mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
837 GEN11_MCR_SUBSLICE_MASK;
838 mcr_slice_subslice_select = GEN11_MCR_SLICE(slice) |
839 GEN11_MCR_SUBSLICE(subslice);
841 mcr_slice_subslice_mask = GEN8_MCR_SLICE_MASK |
842 GEN8_MCR_SUBSLICE_MASK;
843 mcr_slice_subslice_select = GEN8_MCR_SLICE(slice) |
844 GEN8_MCR_SUBSLICE(subslice);
847 default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
849 fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
851 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
853 FW_REG_READ | FW_REG_WRITE);
855 spin_lock_irq(&dev_priv->uncore.lock);
856 intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
858 mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
860 WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
861 default_mcr_s_ss_select);
863 mcr &= ~mcr_slice_subslice_mask;
864 mcr |= mcr_slice_subslice_select;
865 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
867 ret = I915_READ_FW(reg);
869 mcr &= ~mcr_slice_subslice_mask;
870 mcr |= default_mcr_s_ss_select;
872 I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
874 intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
875 spin_unlock_irq(&dev_priv->uncore.lock);
880 /* NB: please notice the memset */
881 void intel_engine_get_instdone(struct intel_engine_cs *engine,
882 struct intel_instdone *instdone)
884 struct drm_i915_private *dev_priv = engine->i915;
885 u32 mmio_base = engine->mmio_base;
889 memset(instdone, 0, sizeof(*instdone));
891 switch (INTEL_GEN(dev_priv)) {
893 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
895 if (engine->id != RCS)
898 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
899 for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
900 instdone->sampler[slice][subslice] =
901 read_subslice_reg(dev_priv, slice, subslice,
902 GEN7_SAMPLER_INSTDONE);
903 instdone->row[slice][subslice] =
904 read_subslice_reg(dev_priv, slice, subslice,
909 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
911 if (engine->id != RCS)
914 instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
915 instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
916 instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
922 instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
924 if (engine->id == RCS)
925 /* HACK: Using the wrong struct member */
926 instdone->slice_common = I915_READ(GEN4_INSTDONE1);
930 instdone->instdone = I915_READ(GEN2_INSTDONE);
935 static bool ring_is_idle(struct intel_engine_cs *engine)
937 struct drm_i915_private *dev_priv = engine->i915;
940 /* If the whole device is asleep, the engine must be idle */
941 if (!intel_runtime_pm_get_if_in_use(dev_priv))
944 /* First check that no commands are left in the ring */
945 if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
946 (I915_READ_TAIL(engine) & TAIL_ADDR))
949 /* No bit for gen2, so assume the CS parser is idle */
950 if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
953 intel_runtime_pm_put(dev_priv);
959 * intel_engine_is_idle() - Report if the engine has finished process all work
960 * @engine: the intel_engine_cs
962 * Return true if there are no requests pending, nothing left to be submitted
963 * to hardware, and that the engine is idle.
965 bool intel_engine_is_idle(struct intel_engine_cs *engine)
967 struct drm_i915_private *dev_priv = engine->i915;
969 /* More white lies, if wedged, hw state is inconsistent */
970 if (i915_terminally_wedged(&dev_priv->gpu_error))
973 /* Any inflight/incomplete requests? */
974 if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
977 if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock))
980 /* Waiting to drain ELSP? */
981 if (READ_ONCE(engine->execlists.active)) {
982 struct tasklet_struct *t = &engine->execlists.tasklet;
985 if (tasklet_trylock(t)) {
986 /* Must wait for any GPU reset in progress. */
987 if (__tasklet_is_enabled(t))
993 /* Otherwise flush the tasklet if it was on another cpu */
994 tasklet_unlock_wait(t);
996 if (READ_ONCE(engine->execlists.active))
1000 /* ELSP is empty, but there are ready requests? E.g. after reset */
1001 if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
1005 if (!ring_is_idle(engine))
1011 bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
1013 struct intel_engine_cs *engine;
1014 enum intel_engine_id id;
1017 * If the driver is wedged, HW state may be very inconsistent and
1018 * report that it is still busy, even though we have stopped using it.
1020 if (i915_terminally_wedged(&dev_priv->gpu_error))
1023 for_each_engine(engine, dev_priv, id) {
1024 if (!intel_engine_is_idle(engine))
1032 * intel_engine_has_kernel_context:
1033 * @engine: the engine
1035 * Returns true if the last context to be executed on this engine, or has been
1036 * executed if the engine is already idle, is the kernel context
1037 * (#i915.kernel_context).
1039 bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
1041 const struct intel_context *kernel_context =
1042 to_intel_context(engine->i915->kernel_context, engine);
1043 struct i915_request *rq;
1045 lockdep_assert_held(&engine->i915->drm.struct_mutex);
1048 * Check the last context seen by the engine. If active, it will be
1049 * the last request that remains in the timeline. When idle, it is
1050 * the last executed context as tracked by retirement.
1052 rq = __i915_gem_active_peek(&engine->timeline.last_request);
1054 return rq->hw_context == kernel_context;
1056 return engine->last_retired_context == kernel_context;
1059 void intel_engines_reset_default_submission(struct drm_i915_private *i915)
1061 struct intel_engine_cs *engine;
1062 enum intel_engine_id id;
1064 for_each_engine(engine, i915, id)
1065 engine->set_default_submission(engine);
1069 * intel_engines_sanitize: called after the GPU has lost power
1070 * @i915: the i915 device
1072 * Anytime we reset the GPU, either with an explicit GPU reset or through a
1073 * PCI power cycle, the GPU loses state and we must reset our state tracking
1074 * to match. Note that calling intel_engines_sanitize() if the GPU has not
1075 * been reset results in much confusion!
1077 void intel_engines_sanitize(struct drm_i915_private *i915)
1079 struct intel_engine_cs *engine;
1080 enum intel_engine_id id;
1084 for_each_engine(engine, i915, id) {
1085 if (engine->reset.reset)
1086 engine->reset.reset(engine, NULL);
1091 * intel_engines_park: called when the GT is transitioning from busy->idle
1092 * @i915: the i915 device
1094 * The GT is now idle and about to go to sleep (maybe never to wake again?).
1095 * Time for us to tidy and put away our toys (release resources back to the
1098 void intel_engines_park(struct drm_i915_private *i915)
1100 struct intel_engine_cs *engine;
1101 enum intel_engine_id id;
1103 for_each_engine(engine, i915, id) {
1104 /* Flush the residual irq tasklets first. */
1105 intel_engine_disarm_breadcrumbs(engine);
1106 tasklet_kill(&engine->execlists.tasklet);
1109 * We are committed now to parking the engines, make sure there
1110 * will be no more interrupts arriving later and the engines
1113 if (wait_for(intel_engine_is_idle(engine), 10)) {
1114 struct drm_printer p = drm_debug_printer(__func__);
1116 dev_err(i915->drm.dev,
1117 "%s is not idle before parking\n",
1119 intel_engine_dump(engine, &p, NULL);
1122 /* Must be reset upon idling, or we may miss the busy wakeup. */
1123 GEM_BUG_ON(engine->execlists.queue_priority != INT_MIN);
1126 engine->park(engine);
1128 if (engine->pinned_default_state) {
1129 i915_gem_object_unpin_map(engine->default_state);
1130 engine->pinned_default_state = NULL;
1133 i915_gem_batch_pool_fini(&engine->batch_pool);
1134 engine->execlists.no_priolist = false;
1139 * intel_engines_unpark: called when the GT is transitioning from idle->busy
1140 * @i915: the i915 device
1142 * The GT was idle and now about to fire up with some new user requests.
1144 void intel_engines_unpark(struct drm_i915_private *i915)
1146 struct intel_engine_cs *engine;
1147 enum intel_engine_id id;
1149 for_each_engine(engine, i915, id) {
1152 /* Pin the default state for fast resets from atomic context. */
1154 if (engine->default_state)
1155 map = i915_gem_object_pin_map(engine->default_state,
1157 if (!IS_ERR_OR_NULL(map))
1158 engine->pinned_default_state = map;
1161 engine->unpark(engine);
1163 intel_engine_init_hangcheck(engine);
1168 * intel_engine_lost_context: called when the GPU is reset into unknown state
1169 * @engine: the engine
1171 * We have either reset the GPU or otherwise about to lose state tracking of
1172 * the current GPU logical state (e.g. suspend). On next use, it is therefore
1173 * imperative that we make no presumptions about the current state and load
1176 void intel_engine_lost_context(struct intel_engine_cs *engine)
1178 struct intel_context *ce;
1180 lockdep_assert_held(&engine->i915->drm.struct_mutex);
1182 ce = fetch_and_zero(&engine->last_retired_context);
1184 intel_context_unpin(ce);
1187 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
1189 switch (INTEL_GEN(engine->i915)) {
1191 return false; /* uses physical not virtual addresses */
1193 /* maybe only uses physical not virtual addresses */
1194 return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915));
1196 return engine->class != VIDEO_DECODE_CLASS; /* b0rked */
1202 unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915)
1204 struct intel_engine_cs *engine;
1205 enum intel_engine_id id;
1209 for_each_engine(engine, i915, id)
1210 if (engine->default_state)
1211 which |= BIT(engine->uabi_class);
1216 static int print_sched_attr(struct drm_i915_private *i915,
1217 const struct i915_sched_attr *attr,
1218 char *buf, int x, int len)
1220 if (attr->priority == I915_PRIORITY_INVALID)
1223 x += snprintf(buf + x, len - x,
1224 " prio=%d", attr->priority);
1229 static void print_request(struct drm_printer *m,
1230 struct i915_request *rq,
1233 const char *name = rq->fence.ops->get_timeline_name(&rq->fence);
1237 x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
1239 drm_printf(m, "%s%x%s [%llx:%x]%s @ %dms: %s\n",
1242 i915_request_completed(rq) ? "!" : "",
1243 rq->fence.context, rq->fence.seqno,
1245 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
1249 static void hexdump(struct drm_printer *m, const void *buf, size_t len)
1251 const size_t rowsize = 8 * sizeof(u32);
1252 const void *prev = NULL;
1256 for (pos = 0; pos < len; pos += rowsize) {
1259 if (prev && !memcmp(prev, buf + pos, rowsize)) {
1261 drm_printf(m, "*\n");
1267 WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
1268 rowsize, sizeof(u32),
1270 false) >= sizeof(line));
1271 drm_printf(m, "[%04zx] %s\n", pos, line);
1278 static void intel_engine_print_registers(const struct intel_engine_cs *engine,
1279 struct drm_printer *m)
1281 struct drm_i915_private *dev_priv = engine->i915;
1282 const struct intel_engine_execlists * const execlists =
1286 if (engine->id == RCS && IS_GEN(dev_priv, 4, 7))
1287 drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
1288 drm_printf(m, "\tRING_START: 0x%08x\n",
1289 I915_READ(RING_START(engine->mmio_base)));
1290 drm_printf(m, "\tRING_HEAD: 0x%08x\n",
1291 I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
1292 drm_printf(m, "\tRING_TAIL: 0x%08x\n",
1293 I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
1294 drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
1295 I915_READ(RING_CTL(engine->mmio_base)),
1296 I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
1297 if (INTEL_GEN(engine->i915) > 2) {
1298 drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
1299 I915_READ(RING_MI_MODE(engine->mmio_base)),
1300 I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
1303 if (INTEL_GEN(dev_priv) >= 6) {
1304 drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
1307 if (HAS_LEGACY_SEMAPHORES(dev_priv)) {
1308 drm_printf(m, "\tSYNC_0: 0x%08x\n",
1309 I915_READ(RING_SYNC_0(engine->mmio_base)));
1310 drm_printf(m, "\tSYNC_1: 0x%08x\n",
1311 I915_READ(RING_SYNC_1(engine->mmio_base)));
1312 if (HAS_VEBOX(dev_priv))
1313 drm_printf(m, "\tSYNC_2: 0x%08x\n",
1314 I915_READ(RING_SYNC_2(engine->mmio_base)));
1317 addr = intel_engine_get_active_head(engine);
1318 drm_printf(m, "\tACTHD: 0x%08x_%08x\n",
1319 upper_32_bits(addr), lower_32_bits(addr));
1320 addr = intel_engine_get_last_batch_head(engine);
1321 drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
1322 upper_32_bits(addr), lower_32_bits(addr));
1323 if (INTEL_GEN(dev_priv) >= 8)
1324 addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
1325 RING_DMA_FADD_UDW(engine->mmio_base));
1326 else if (INTEL_GEN(dev_priv) >= 4)
1327 addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
1329 addr = I915_READ(DMA_FADD_I8XX);
1330 drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
1331 upper_32_bits(addr), lower_32_bits(addr));
1332 if (INTEL_GEN(dev_priv) >= 4) {
1333 drm_printf(m, "\tIPEIR: 0x%08x\n",
1334 I915_READ(RING_IPEIR(engine->mmio_base)));
1335 drm_printf(m, "\tIPEHR: 0x%08x\n",
1336 I915_READ(RING_IPEHR(engine->mmio_base)));
1338 drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
1339 drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
1342 if (HAS_EXECLISTS(dev_priv)) {
1343 const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
1347 drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
1348 I915_READ(RING_EXECLIST_STATUS_LO(engine)),
1349 I915_READ(RING_EXECLIST_STATUS_HI(engine)));
1351 read = execlists->csb_head;
1352 write = READ_ONCE(*execlists->csb_write);
1354 drm_printf(m, "\tExeclist CSB read %d, write %d [mmio:%d], tasklet queued? %s (%s)\n",
1356 GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine))),
1357 yesno(test_bit(TASKLET_STATE_SCHED,
1358 &engine->execlists.tasklet.state)),
1359 enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
1360 if (read >= GEN8_CSB_ENTRIES)
1362 if (write >= GEN8_CSB_ENTRIES)
1365 write += GEN8_CSB_ENTRIES;
1366 while (read < write) {
1367 idx = ++read % GEN8_CSB_ENTRIES;
1368 drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [mmio:0x%08x], context: %d [mmio:%d]\n",
1371 I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
1373 I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
1377 for (idx = 0; idx < execlists_num_ports(execlists); idx++) {
1378 struct i915_request *rq;
1381 rq = port_unpack(&execlists->port[idx], &count);
1385 snprintf(hdr, sizeof(hdr),
1386 "\t\tELSP[%d] count=%d, ring->start=%08x, rq: ",
1388 i915_ggtt_offset(rq->ring->vma));
1389 print_request(m, rq, hdr);
1391 drm_printf(m, "\t\tELSP[%d] idle\n", idx);
1394 drm_printf(m, "\t\tHW active? 0x%x\n", execlists->active);
1396 } else if (INTEL_GEN(dev_priv) > 6) {
1397 drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
1398 I915_READ(RING_PP_DIR_BASE(engine)));
1399 drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
1400 I915_READ(RING_PP_DIR_BASE_READ(engine)));
1401 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
1402 I915_READ(RING_PP_DIR_DCLV(engine)));
1406 static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
1412 "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n",
1413 rq->head, rq->postfix, rq->tail,
1414 rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u,
1415 rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u);
1417 size = rq->tail - rq->head;
1418 if (rq->tail < rq->head)
1419 size += rq->ring->size;
1421 ring = kmalloc(size, GFP_ATOMIC);
1423 const void *vaddr = rq->ring->vaddr;
1424 unsigned int head = rq->head;
1425 unsigned int len = 0;
1427 if (rq->tail < head) {
1428 len = rq->ring->size - head;
1429 memcpy(ring, vaddr + head, len);
1432 memcpy(ring + len, vaddr + head, size - len);
1434 hexdump(m, ring, size);
1439 void intel_engine_dump(struct intel_engine_cs *engine,
1440 struct drm_printer *m,
1441 const char *header, ...)
1443 const int MAX_REQUESTS_TO_SHOW = 8;
1444 struct intel_breadcrumbs * const b = &engine->breadcrumbs;
1445 const struct intel_engine_execlists * const execlists = &engine->execlists;
1446 struct i915_gpu_error * const error = &engine->i915->gpu_error;
1447 struct i915_request *rq, *last;
1448 unsigned long flags;
1455 va_start(ap, header);
1456 drm_vprintf(m, header, &ap);
1460 if (i915_terminally_wedged(&engine->i915->gpu_error))
1461 drm_printf(m, "*** WEDGED ***\n");
1463 drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
1464 intel_engine_get_seqno(engine),
1465 intel_engine_last_submit(engine),
1466 engine->hangcheck.seqno,
1467 jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
1468 drm_printf(m, "\tReset count: %d (global %d)\n",
1469 i915_reset_engine_count(error, engine),
1470 i915_reset_count(error));
1474 drm_printf(m, "\tRequests:\n");
1476 rq = list_first_entry(&engine->timeline.requests,
1477 struct i915_request, link);
1478 if (&rq->link != &engine->timeline.requests)
1479 print_request(m, rq, "\t\tfirst ");
1481 rq = list_last_entry(&engine->timeline.requests,
1482 struct i915_request, link);
1483 if (&rq->link != &engine->timeline.requests)
1484 print_request(m, rq, "\t\tlast ");
1486 rq = i915_gem_find_active_request(engine);
1488 print_request(m, rq, "\t\tactive ");
1490 drm_printf(m, "\t\tring->start: 0x%08x\n",
1491 i915_ggtt_offset(rq->ring->vma));
1492 drm_printf(m, "\t\tring->head: 0x%08x\n",
1494 drm_printf(m, "\t\tring->tail: 0x%08x\n",
1496 drm_printf(m, "\t\tring->emit: 0x%08x\n",
1498 drm_printf(m, "\t\tring->space: 0x%08x\n",
1501 print_request_ring(m, rq);
1506 if (intel_runtime_pm_get_if_in_use(engine->i915)) {
1507 intel_engine_print_registers(engine, m);
1508 intel_runtime_pm_put(engine->i915);
1510 drm_printf(m, "\tDevice is asleep; skipping register dump\n");
1513 local_irq_save(flags);
1514 spin_lock(&engine->timeline.lock);
1518 list_for_each_entry(rq, &engine->timeline.requests, link) {
1519 if (count++ < MAX_REQUESTS_TO_SHOW - 1)
1520 print_request(m, rq, "\t\tE ");
1525 if (count > MAX_REQUESTS_TO_SHOW) {
1527 "\t\t...skipping %d executing requests...\n",
1528 count - MAX_REQUESTS_TO_SHOW);
1530 print_request(m, last, "\t\tE ");
1535 drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
1536 for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
1537 struct i915_priolist *p =
1538 rb_entry(rb, typeof(*p), node);
1540 list_for_each_entry(rq, &p->requests, sched.link) {
1541 if (count++ < MAX_REQUESTS_TO_SHOW - 1)
1542 print_request(m, rq, "\t\tQ ");
1548 if (count > MAX_REQUESTS_TO_SHOW) {
1550 "\t\t...skipping %d queued requests...\n",
1551 count - MAX_REQUESTS_TO_SHOW);
1553 print_request(m, last, "\t\tQ ");
1556 spin_unlock(&engine->timeline.lock);
1558 spin_lock(&b->rb_lock);
1559 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
1560 struct intel_wait *w = rb_entry(rb, typeof(*w), node);
1562 drm_printf(m, "\t%s [%d] waiting for %x\n",
1563 w->tsk->comm, w->tsk->pid, w->seqno);
1565 spin_unlock(&b->rb_lock);
1566 local_irq_restore(flags);
1568 drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s)\n",
1570 yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
1571 &engine->irq_posted)));
1573 drm_printf(m, "HWSP:\n");
1574 hexdump(m, engine->status_page.page_addr, PAGE_SIZE);
1576 drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
1579 static u8 user_class_map[] = {
1580 [I915_ENGINE_CLASS_RENDER] = RENDER_CLASS,
1581 [I915_ENGINE_CLASS_COPY] = COPY_ENGINE_CLASS,
1582 [I915_ENGINE_CLASS_VIDEO] = VIDEO_DECODE_CLASS,
1583 [I915_ENGINE_CLASS_VIDEO_ENHANCE] = VIDEO_ENHANCEMENT_CLASS,
1586 struct intel_engine_cs *
1587 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
1589 if (class >= ARRAY_SIZE(user_class_map))
1592 class = user_class_map[class];
1594 GEM_BUG_ON(class > MAX_ENGINE_CLASS);
1596 if (instance > MAX_ENGINE_INSTANCE)
1599 return i915->engine_class[class][instance];
1603 * intel_enable_engine_stats() - Enable engine busy tracking on engine
1604 * @engine: engine to enable stats collection
1606 * Start collecting the engine busyness data for @engine.
1608 * Returns 0 on success or a negative error code.
1610 int intel_enable_engine_stats(struct intel_engine_cs *engine)
1612 struct intel_engine_execlists *execlists = &engine->execlists;
1613 unsigned long flags;
1616 if (!intel_engine_supports_stats(engine))
1619 spin_lock_irqsave(&engine->timeline.lock, flags);
1620 write_seqlock(&engine->stats.lock);
1622 if (unlikely(engine->stats.enabled == ~0)) {
1627 if (engine->stats.enabled++ == 0) {
1628 const struct execlist_port *port = execlists->port;
1629 unsigned int num_ports = execlists_num_ports(execlists);
1631 engine->stats.enabled_at = ktime_get();
1633 /* XXX submission method oblivious? */
1634 while (num_ports-- && port_isset(port)) {
1635 engine->stats.active++;
1639 if (engine->stats.active)
1640 engine->stats.start = engine->stats.enabled_at;
1644 write_sequnlock(&engine->stats.lock);
1645 spin_unlock_irqrestore(&engine->timeline.lock, flags);
1650 static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
1652 ktime_t total = engine->stats.total;
1655 * If the engine is executing something at the moment
1656 * add it to the total.
1658 if (engine->stats.active)
1659 total = ktime_add(total,
1660 ktime_sub(ktime_get(), engine->stats.start));
1666 * intel_engine_get_busy_time() - Return current accumulated engine busyness
1667 * @engine: engine to report on
1669 * Returns accumulated time @engine was busy since engine stats were enabled.
1671 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine)
1677 seq = read_seqbegin(&engine->stats.lock);
1678 total = __intel_engine_get_busy_time(engine);
1679 } while (read_seqretry(&engine->stats.lock, seq));
1685 * intel_disable_engine_stats() - Disable engine busy tracking on engine
1686 * @engine: engine to disable stats collection
1688 * Stops collecting the engine busyness data for @engine.
1690 void intel_disable_engine_stats(struct intel_engine_cs *engine)
1692 unsigned long flags;
1694 if (!intel_engine_supports_stats(engine))
1697 write_seqlock_irqsave(&engine->stats.lock, flags);
1698 WARN_ON_ONCE(engine->stats.enabled == 0);
1699 if (--engine->stats.enabled == 0) {
1700 engine->stats.total = __intel_engine_get_busy_time(engine);
1701 engine->stats.active = 0;
1703 write_sequnlock_irqrestore(&engine->stats.lock, flags);
1706 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1707 #include "selftests/mock_engine.c"
1708 #include "selftests/intel_engine_cs.c"