drm/i915: Drop the CONTEXT_CLONE API (v2)
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gt / intel_execlists_submission.c
index de12487..7dd7afc 100644 (file)
 #include "intel_engine_stats.h"
 #include "intel_execlists_submission.h"
 #include "intel_gt.h"
+#include "intel_gt_irq.h"
 #include "intel_gt_pm.h"
 #include "intel_gt_requests.h"
 #include "intel_lrc.h"
@@ -272,11 +273,11 @@ static int effective_prio(const struct i915_request *rq)
        return prio;
 }
 
-static int queue_prio(const struct intel_engine_execlists *execlists)
+static int queue_prio(const struct i915_sched_engine *sched_engine)
 {
        struct rb_node *rb;
 
-       rb = rb_first_cached(&execlists->queue);
+       rb = rb_first_cached(&sched_engine->queue);
        if (!rb)
                return INT_MIN;
 
@@ -317,14 +318,14 @@ static bool need_preempt(const struct intel_engine_cs *engine,
         * to preserve FIFO ordering of dependencies.
         */
        last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
-       if (engine->execlists.queue_priority_hint <= last_prio)
+       if (engine->sched_engine->queue_priority_hint <= last_prio)
                return false;
 
        /*
         * Check against the first request in ELSP[1], it will, thanks to the
         * power of PI, be the highest priority of that context.
         */
-       if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
+       if (!list_is_last(&rq->sched.link, &engine->sched_engine->requests) &&
            rq_prio(list_next_entry(rq, sched.link)) > last_prio)
                return true;
 
@@ -339,7 +340,7 @@ static bool need_preempt(const struct intel_engine_cs *engine,
         * context, it's priority would not exceed ELSP[0] aka last_prio.
         */
        return max(virtual_prio(&engine->execlists),
-                  queue_prio(&engine->execlists)) > last_prio;
+                  queue_prio(engine->sched_engine)) > last_prio;
 }
 
 __maybe_unused static bool
@@ -366,10 +367,10 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
        struct list_head *pl;
        int prio = I915_PRIORITY_INVALID;
 
-       lockdep_assert_held(&engine->active.lock);
+       lockdep_assert_held(&engine->sched_engine->lock);
 
        list_for_each_entry_safe_reverse(rq, rn,
-                                        &engine->active.requests,
+                                        &engine->sched_engine->requests,
                                         sched.link) {
                if (__i915_request_is_complete(rq)) {
                        list_del_init(&rq->sched.link);
@@ -381,9 +382,10 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
                GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
                if (rq_prio(rq) != prio) {
                        prio = rq_prio(rq);
-                       pl = i915_sched_lookup_priolist(engine, prio);
+                       pl = i915_sched_lookup_priolist(engine->sched_engine,
+                                                       prio);
                }
-               GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+               GEM_BUG_ON(i915_sched_engine_is_empty(engine->sched_engine));
 
                list_move(&rq->sched.link, pl);
                set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
@@ -533,13 +535,13 @@ resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
 {
        struct intel_engine_cs *engine = rq->engine;
 
-       spin_lock_irq(&engine->active.lock);
+       spin_lock_irq(&engine->sched_engine->lock);
 
        clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
        WRITE_ONCE(rq->engine, &ve->base);
        ve->base.submit_request(rq);
 
-       spin_unlock_irq(&engine->active.lock);
+       spin_unlock_irq(&engine->sched_engine->lock);
 }
 
 static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
@@ -568,7 +570,7 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
                resubmit_virtual_request(rq, ve);
 
        if (READ_ONCE(ve->request))
-               tasklet_hi_schedule(&ve->base.execlists.tasklet);
+               tasklet_hi_schedule(&ve->base.sched_engine->tasklet);
 }
 
 static void __execlists_schedule_out(struct i915_request * const rq,
@@ -578,7 +580,7 @@ static void __execlists_schedule_out(struct i915_request * const rq,
        unsigned int ccid;
 
        /*
-        * NB process_csb() is not under the engine->active.lock and hence
+        * NB process_csb() is not under the engine->sched_engine->lock and hence
         * schedule_out can race with schedule_in meaning that we should
         * refrain from doing non-trivial work here.
         */
@@ -737,9 +739,9 @@ trace_ports(const struct intel_engine_execlists *execlists,
 }
 
 static bool
-reset_in_progress(const struct intel_engine_execlists *execlists)
+reset_in_progress(const struct intel_engine_cs *engine)
 {
-       return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
+       return unlikely(!__tasklet_is_enabled(&engine->sched_engine->tasklet));
 }
 
 static __maybe_unused noinline bool
@@ -755,7 +757,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
        trace_ports(execlists, msg, execlists->pending);
 
        /* We may be messing around with the lists during reset, lalala */
-       if (reset_in_progress(execlists))
+       if (reset_in_progress(engine))
                return true;
 
        if (!execlists->pending[0]) {
@@ -1095,7 +1097,8 @@ static void defer_active(struct intel_engine_cs *engine)
        if (!rq)
                return;
 
-       defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+       defer_request(rq, i915_sched_lookup_priolist(engine->sched_engine,
+                                                    rq_prio(rq)));
 }
 
 static bool
@@ -1132,13 +1135,14 @@ static bool needs_timeslice(const struct intel_engine_cs *engine,
                return false;
 
        /* If ELSP[1] is occupied, always check to see if worth slicing */
-       if (!list_is_last_rcu(&rq->sched.link, &engine->active.requests)) {
+       if (!list_is_last_rcu(&rq->sched.link,
+                             &engine->sched_engine->requests)) {
                ENGINE_TRACE(engine, "timeslice required for second inflight context\n");
                return true;
        }
 
        /* Otherwise, ELSP[0] is by itself, but may be waiting in the queue */
-       if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)) {
+       if (!i915_sched_engine_is_empty(engine->sched_engine)) {
                ENGINE_TRACE(engine, "timeslice required for queue\n");
                return true;
        }
@@ -1186,7 +1190,7 @@ static void start_timeslice(struct intel_engine_cs *engine)
                         * its timeslice, so recheck.
                         */
                        if (!timer_pending(&el->timer))
-                               tasklet_hi_schedule(&el->tasklet);
+                               tasklet_hi_schedule(&engine->sched_engine->tasklet);
                        return;
                }
 
@@ -1235,6 +1239,7 @@ static bool completed(const struct i915_request *rq)
 static void execlists_dequeue(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
+       struct i915_sched_engine * const sched_engine = engine->sched_engine;
        struct i915_request **port = execlists->pending;
        struct i915_request ** const last_port = port + execlists->port_mask;
        struct i915_request *last, * const *active;
@@ -1264,7 +1269,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
         * and context switches) submission.
         */
 
-       spin_lock(&engine->active.lock);
+       spin_lock(&sched_engine->lock);
 
        /*
         * If the queue is higher priority than the last
@@ -1286,7 +1291,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                     last->fence.context,
                                     last->fence.seqno,
                                     last->sched.attr.priority,
-                                    execlists->queue_priority_hint);
+                                    sched_engine->queue_priority_hint);
                        record_preemption(execlists);
 
                        /*
@@ -1312,7 +1317,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                     yesno(timer_expired(&execlists->timer)),
                                     last->fence.context, last->fence.seqno,
                                     rq_prio(last),
-                                    execlists->queue_priority_hint,
+                                    sched_engine->queue_priority_hint,
                                     yesno(timeslice_yield(execlists, last)));
 
                        /*
@@ -1364,7 +1369,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                 * Even if ELSP[1] is occupied and not worthy
                                 * of timeslices, our queue might be.
                                 */
-                               spin_unlock(&engine->active.lock);
+                               spin_unlock(&sched_engine->lock);
                                return;
                        }
                }
@@ -1374,7 +1379,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
        while ((ve = first_virtual_engine(engine))) {
                struct i915_request *rq;
 
-               spin_lock(&ve->base.active.lock);
+               spin_lock(&ve->base.sched_engine->lock);
 
                rq = ve->request;
                if (unlikely(!virtual_matches(ve, rq, engine)))
@@ -1383,14 +1388,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                GEM_BUG_ON(rq->engine != &ve->base);
                GEM_BUG_ON(rq->context != &ve->context);
 
-               if (unlikely(rq_prio(rq) < queue_prio(execlists))) {
-                       spin_unlock(&ve->base.active.lock);
+               if (unlikely(rq_prio(rq) < queue_prio(sched_engine))) {
+                       spin_unlock(&ve->base.sched_engine->lock);
                        break;
                }
 
                if (last && !can_merge_rq(last, rq)) {
-                       spin_unlock(&ve->base.active.lock);
-                       spin_unlock(&engine->active.lock);
+                       spin_unlock(&ve->base.sched_engine->lock);
+                       spin_unlock(&engine->sched_engine->lock);
                        return; /* leave this for another sibling */
                }
 
@@ -1404,7 +1409,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                             yesno(engine != ve->siblings[0]));
 
                WRITE_ONCE(ve->request, NULL);
-               WRITE_ONCE(ve->base.execlists.queue_priority_hint, INT_MIN);
+               WRITE_ONCE(ve->base.sched_engine->queue_priority_hint, INT_MIN);
 
                rb = &ve->nodes[engine->id].rb;
                rb_erase_cached(rb, &execlists->virtual);
@@ -1436,7 +1441,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 
                i915_request_put(rq);
 unlock:
-               spin_unlock(&ve->base.active.lock);
+               spin_unlock(&ve->base.sched_engine->lock);
 
                /*
                 * Hmm, we have a bunch of virtual engine requests,
@@ -1449,7 +1454,7 @@ unlock:
                        break;
        }
 
-       while ((rb = rb_first_cached(&execlists->queue))) {
+       while ((rb = rb_first_cached(&sched_engine->queue))) {
                struct i915_priolist *p = to_priolist(rb);
                struct i915_request *rq, *rn;
 
@@ -1528,7 +1533,7 @@ unlock:
                        }
                }
 
-               rb_erase_cached(&p->node, &execlists->queue);
+               rb_erase_cached(&p->node, &sched_engine->queue);
                i915_priolist_free(p);
        }
 done:
@@ -1550,8 +1555,9 @@ done:
         * request triggering preemption on the next dequeue (or subsequent
         * interrupt for secondary ports).
         */
-       execlists->queue_priority_hint = queue_prio(execlists);
-       spin_unlock(&engine->active.lock);
+       sched_engine->queue_priority_hint = queue_prio(sched_engine);
+       i915_sched_engine_reset_on_empty(sched_engine);
+       spin_unlock(&sched_engine->lock);
 
        /*
         * We can skip poking the HW if we ended up with exactly the same set
@@ -1766,9 +1772,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
         * access. Either we are inside the tasklet, or the tasklet is disabled
         * and we assume that is only inside the reset paths and so serialised.
         */
-       GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
-                  !reset_in_progress(execlists));
-       GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
+       GEM_BUG_ON(!tasklet_is_locked(&engine->sched_engine->tasklet) &&
+                  !reset_in_progress(engine));
 
        /*
         * Note that csb_write, csb_status may be either in HWSP or mmio.
@@ -1847,7 +1852,7 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
                ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
                             head, upper_32_bits(csb), lower_32_bits(csb));
 
-               if (INTEL_GEN(engine->i915) >= 12)
+               if (GRAPHICS_VER(engine->i915) >= 12)
                        promote = gen12_csb_parse(csb);
                else
                        promote = gen8_csb_parse(csb);
@@ -1979,7 +1984,8 @@ static void __execlists_hold(struct i915_request *rq)
                        __i915_request_unsubmit(rq);
 
                clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-               list_move_tail(&rq->sched.link, &rq->engine->active.hold);
+               list_move_tail(&rq->sched.link,
+                              &rq->engine->sched_engine->hold);
                i915_request_set_hold(rq);
                RQ_TRACE(rq, "on hold\n");
 
@@ -2016,7 +2022,7 @@ static bool execlists_hold(struct intel_engine_cs *engine,
        if (i915_request_on_hold(rq))
                return false;
 
-       spin_lock_irq(&engine->active.lock);
+       spin_lock_irq(&engine->sched_engine->lock);
 
        if (__i915_request_is_complete(rq)) { /* too late! */
                rq = NULL;
@@ -2032,10 +2038,10 @@ static bool execlists_hold(struct intel_engine_cs *engine,
        GEM_BUG_ON(i915_request_on_hold(rq));
        GEM_BUG_ON(rq->engine != engine);
        __execlists_hold(rq);
-       GEM_BUG_ON(list_empty(&engine->active.hold));
+       GEM_BUG_ON(list_empty(&engine->sched_engine->hold));
 
 unlock:
-       spin_unlock_irq(&engine->active.lock);
+       spin_unlock_irq(&engine->sched_engine->lock);
        return rq;
 }
 
@@ -2079,7 +2085,7 @@ static void __execlists_unhold(struct i915_request *rq)
 
                i915_request_clear_hold(rq);
                list_move_tail(&rq->sched.link,
-                              i915_sched_lookup_priolist(rq->engine,
+                              i915_sched_lookup_priolist(rq->engine->sched_engine,
                                                          rq_prio(rq)));
                set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
 
@@ -2115,7 +2121,7 @@ static void __execlists_unhold(struct i915_request *rq)
 static void execlists_unhold(struct intel_engine_cs *engine,
                             struct i915_request *rq)
 {
-       spin_lock_irq(&engine->active.lock);
+       spin_lock_irq(&engine->sched_engine->lock);
 
        /*
         * Move this request back to the priority queue, and all of its
@@ -2123,12 +2129,12 @@ static void execlists_unhold(struct intel_engine_cs *engine,
         */
        __execlists_unhold(rq);
 
-       if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
-               engine->execlists.queue_priority_hint = rq_prio(rq);
-               tasklet_hi_schedule(&engine->execlists.tasklet);
+       if (rq_prio(rq) > engine->sched_engine->queue_priority_hint) {
+               engine->sched_engine->queue_priority_hint = rq_prio(rq);
+               tasklet_hi_schedule(&engine->sched_engine->tasklet);
        }
 
-       spin_unlock_irq(&engine->active.lock);
+       spin_unlock_irq(&engine->sched_engine->lock);
 }
 
 struct execlists_capture {
@@ -2258,13 +2264,13 @@ static void execlists_capture(struct intel_engine_cs *engine)
        if (!cap)
                return;
 
-       spin_lock_irq(&engine->active.lock);
+       spin_lock_irq(&engine->sched_engine->lock);
        cap->rq = active_context(engine, active_ccid(engine));
        if (cap->rq) {
                cap->rq = active_request(cap->rq->context->timeline, cap->rq);
                cap->rq = i915_request_get_rcu(cap->rq);
        }
-       spin_unlock_irq(&engine->active.lock);
+       spin_unlock_irq(&engine->sched_engine->lock);
        if (!cap->rq)
                goto err_free;
 
@@ -2316,13 +2322,13 @@ static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
        ENGINE_TRACE(engine, "reset for %s\n", msg);
 
        /* Mark this tasklet as disabled to avoid waiting for it to complete */
-       tasklet_disable_nosync(&engine->execlists.tasklet);
+       tasklet_disable_nosync(&engine->sched_engine->tasklet);
 
        ring_set_paused(engine, 1); /* Freeze the current request in place */
        execlists_capture(engine);
        intel_engine_reset(engine, msg);
 
-       tasklet_enable(&engine->execlists.tasklet);
+       tasklet_enable(&engine->sched_engine->tasklet);
        clear_and_wake_up_bit(bit, lock);
 }
 
@@ -2345,8 +2351,9 @@ static bool preempt_timeout(const struct intel_engine_cs *const engine)
  */
 static void execlists_submission_tasklet(struct tasklet_struct *t)
 {
-       struct intel_engine_cs * const engine =
-               from_tasklet(engine, t, execlists.tasklet);
+       struct i915_sched_engine *sched_engine =
+               from_tasklet(sched_engine, t, tasklet);
+       struct intel_engine_cs * const engine = sched_engine->private_data;
        struct i915_request *post[2 * EXECLIST_MAX_PORTS];
        struct i915_request **inactive;
 
@@ -2385,10 +2392,52 @@ static void execlists_submission_tasklet(struct tasklet_struct *t)
        rcu_read_unlock();
 }
 
+static void execlists_irq_handler(struct intel_engine_cs *engine, u16 iir)
+{
+       bool tasklet = false;
+
+       if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
+               u32 eir;
+
+               /* Upper 16b are the enabling mask, rsvd for internal errors */
+               eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0);
+               ENGINE_TRACE(engine, "CS error: %x\n", eir);
+
+               /* Disable the error interrupt until after the reset */
+               if (likely(eir)) {
+                       ENGINE_WRITE(engine, RING_EMR, ~0u);
+                       ENGINE_WRITE(engine, RING_EIR, eir);
+                       WRITE_ONCE(engine->execlists.error_interrupt, eir);
+                       tasklet = true;
+               }
+       }
+
+       if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) {
+               WRITE_ONCE(engine->execlists.yield,
+                          ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI));
+               ENGINE_TRACE(engine, "semaphore yield: %08x\n",
+                            engine->execlists.yield);
+               if (del_timer(&engine->execlists.timer))
+                       tasklet = true;
+       }
+
+       if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
+               tasklet = true;
+
+       if (iir & GT_RENDER_USER_INTERRUPT)
+               intel_engine_signal_breadcrumbs(engine);
+
+       if (tasklet)
+               tasklet_hi_schedule(&engine->sched_engine->tasklet);
+}
+
 static void __execlists_kick(struct intel_engine_execlists *execlists)
 {
+       struct intel_engine_cs *engine =
+               container_of(execlists, typeof(*engine), execlists);
+
        /* Kick the tasklet for some interrupt coalescing and reset handling */
-       tasklet_hi_schedule(&execlists->tasklet);
+       tasklet_hi_schedule(&engine->sched_engine->tasklet);
 }
 
 #define execlists_kick(t, member) \
@@ -2409,19 +2458,20 @@ static void queue_request(struct intel_engine_cs *engine,
 {
        GEM_BUG_ON(!list_empty(&rq->sched.link));
        list_add_tail(&rq->sched.link,
-                     i915_sched_lookup_priolist(engine, rq_prio(rq)));
+                     i915_sched_lookup_priolist(engine->sched_engine,
+                                                rq_prio(rq)));
        set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
 }
 
 static bool submit_queue(struct intel_engine_cs *engine,
                         const struct i915_request *rq)
 {
-       struct intel_engine_execlists *execlists = &engine->execlists;
+       struct i915_sched_engine *sched_engine = engine->sched_engine;
 
-       if (rq_prio(rq) <= execlists->queue_priority_hint)
+       if (rq_prio(rq) <= sched_engine->queue_priority_hint)
                return false;
 
-       execlists->queue_priority_hint = rq_prio(rq);
+       sched_engine->queue_priority_hint = rq_prio(rq);
        return true;
 }
 
@@ -2429,7 +2479,7 @@ static bool ancestor_on_hold(const struct intel_engine_cs *engine,
                             const struct i915_request *rq)
 {
        GEM_BUG_ON(i915_request_on_hold(rq));
-       return !list_empty(&engine->active.hold) && hold_request(rq);
+       return !list_empty(&engine->sched_engine->hold) && hold_request(rq);
 }
 
 static void execlists_submit_request(struct i915_request *request)
@@ -2438,23 +2488,24 @@ static void execlists_submit_request(struct i915_request *request)
        unsigned long flags;
 
        /* Will be called from irq-context when using foreign fences. */
-       spin_lock_irqsave(&engine->active.lock, flags);
+       spin_lock_irqsave(&engine->sched_engine->lock, flags);
 
        if (unlikely(ancestor_on_hold(engine, request))) {
                RQ_TRACE(request, "ancestor on hold\n");
-               list_add_tail(&request->sched.link, &engine->active.hold);
+               list_add_tail(&request->sched.link,
+                             &engine->sched_engine->hold);
                i915_request_set_hold(request);
        } else {
                queue_request(engine, request);
 
-               GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+               GEM_BUG_ON(i915_sched_engine_is_empty(engine->sched_engine));
                GEM_BUG_ON(list_empty(&request->sched.link));
 
                if (submit_queue(engine, request))
                        __execlists_kick(&engine->execlists);
        }
 
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
 }
 
 static int
@@ -2733,7 +2784,7 @@ static void enable_execlists(struct intel_engine_cs *engine)
 
        intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
 
-       if (INTEL_GEN(engine->i915) >= 11)
+       if (GRAPHICS_VER(engine->i915) >= 11)
                mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
        else
                mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
@@ -2761,10 +2812,8 @@ static int execlists_resume(struct intel_engine_cs *engine)
 
 static void execlists_reset_prepare(struct intel_engine_cs *engine)
 {
-       struct intel_engine_execlists * const execlists = &engine->execlists;
-
        ENGINE_TRACE(engine, "depth<-%d\n",
-                    atomic_read(&execlists->tasklet.count));
+                    atomic_read(&engine->sched_engine->tasklet.count));
 
        /*
         * Prevent request submission to the hardware until we have
@@ -2775,8 +2824,8 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
         * Turning off the execlists->tasklet until the reset is over
         * prevents the race.
         */
-       __tasklet_disable_sync_once(&execlists->tasklet);
-       GEM_BUG_ON(!reset_in_progress(execlists));
+       __tasklet_disable_sync_once(&engine->sched_engine->tasklet);
+       GEM_BUG_ON(!reset_in_progress(engine));
 
        /*
         * We stop engines, otherwise we might get failed reset and a
@@ -2918,24 +2967,26 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 
        /* Push back any incomplete requests for replay after the reset. */
        rcu_read_lock();
-       spin_lock_irqsave(&engine->active.lock, flags);
+       spin_lock_irqsave(&engine->sched_engine->lock, flags);
        __unwind_incomplete_requests(engine);
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
        rcu_read_unlock();
 }
 
 static void nop_submission_tasklet(struct tasklet_struct *t)
 {
-       struct intel_engine_cs * const engine =
-               from_tasklet(engine, t, execlists.tasklet);
+       struct i915_sched_engine *sched_engine =
+               from_tasklet(sched_engine, t, tasklet);
+       struct intel_engine_cs * const engine = sched_engine->private_data;
 
        /* The driver is wedged; don't process any more events. */
-       WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
+       WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN);
 }
 
 static void execlists_reset_cancel(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
+       struct i915_sched_engine * const sched_engine = engine->sched_engine;
        struct i915_request *rq, *rn;
        struct rb_node *rb;
        unsigned long flags;
@@ -2959,15 +3010,15 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
        execlists_reset_csb(engine, true);
 
        rcu_read_lock();
-       spin_lock_irqsave(&engine->active.lock, flags);
+       spin_lock_irqsave(&engine->sched_engine->lock, flags);
 
        /* Mark all executing requests as skipped. */
-       list_for_each_entry(rq, &engine->active.requests, sched.link)
+       list_for_each_entry(rq, &engine->sched_engine->requests, sched.link)
                i915_request_put(i915_request_mark_eio(rq));
        intel_engine_signal_breadcrumbs(engine);
 
        /* Flush the queued requests to the timeline list (for retiring). */
-       while ((rb = rb_first_cached(&execlists->queue))) {
+       while ((rb = rb_first_cached(&sched_engine->queue))) {
                struct i915_priolist *p = to_priolist(rb);
 
                priolist_for_each_request_consume(rq, rn, p) {
@@ -2977,12 +3028,12 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
                        }
                }
 
-               rb_erase_cached(&p->node, &execlists->queue);
+               rb_erase_cached(&p->node, &sched_engine->queue);
                i915_priolist_free(p);
        }
 
        /* On-hold requests will be flushed to timeline upon their release */
-       list_for_each_entry(rq, &engine->active.hold, sched.link)
+       list_for_each_entry(rq, &sched_engine->hold, sched.link)
                i915_request_put(i915_request_mark_eio(rq));
 
        /* Cancel all attached virtual engines */
@@ -2993,7 +3044,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
                rb_erase_cached(rb, &execlists->virtual);
                RB_CLEAR_NODE(rb);
 
-               spin_lock(&ve->base.active.lock);
+               spin_lock(&ve->base.sched_engine->lock);
                rq = fetch_and_zero(&ve->request);
                if (rq) {
                        if (i915_request_mark_eio(rq)) {
@@ -3003,20 +3054,20 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
                        }
                        i915_request_put(rq);
 
-                       ve->base.execlists.queue_priority_hint = INT_MIN;
+                       ve->base.sched_engine->queue_priority_hint = INT_MIN;
                }
-               spin_unlock(&ve->base.active.lock);
+               spin_unlock(&ve->base.sched_engine->lock);
        }
 
        /* Remaining _unready_ requests will be nop'ed when submitted */
 
-       execlists->queue_priority_hint = INT_MIN;
-       execlists->queue = RB_ROOT_CACHED;
+       sched_engine->queue_priority_hint = INT_MIN;
+       sched_engine->queue = RB_ROOT_CACHED;
 
-       GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
-       execlists->tasklet.callback = nop_submission_tasklet;
+       GEM_BUG_ON(__tasklet_is_enabled(&engine->sched_engine->tasklet));
+       engine->sched_engine->tasklet.callback = nop_submission_tasklet;
 
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
        rcu_read_unlock();
 }
 
@@ -3034,14 +3085,14 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
         * reset as the next level of recovery, and as a final resort we
         * will declare the device wedged.
         */
-       GEM_BUG_ON(!reset_in_progress(execlists));
+       GEM_BUG_ON(!reset_in_progress(engine));
 
        /* And kick in case we missed a new request submission. */
-       if (__tasklet_enable(&execlists->tasklet))
+       if (__tasklet_enable(&engine->sched_engine->tasklet))
                __execlists_kick(execlists);
 
        ENGINE_TRACE(engine, "depth->%d\n",
-                    atomic_read(&execlists->tasklet.count));
+                    atomic_read(&engine->sched_engine->tasklet.count));
 }
 
 static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
@@ -3064,41 +3115,69 @@ static void execlists_park(struct intel_engine_cs *engine)
 
 static bool can_preempt(struct intel_engine_cs *engine)
 {
-       if (INTEL_GEN(engine->i915) > 8)
+       if (GRAPHICS_VER(engine->i915) > 8)
                return true;
 
        /* GPGPU on bdw requires extra w/a; not implemented */
        return engine->class != RENDER_CLASS;
 }
 
-static void execlists_set_default_submission(struct intel_engine_cs *engine)
+static void kick_execlists(const struct i915_request *rq, int prio)
 {
-       engine->submit_request = execlists_submit_request;
-       engine->schedule = i915_schedule;
-       engine->execlists.tasklet.callback = execlists_submission_tasklet;
+       struct intel_engine_cs *engine = rq->engine;
+       struct i915_sched_engine *sched_engine = engine->sched_engine;
+       const struct i915_request *inflight;
 
-       engine->reset.prepare = execlists_reset_prepare;
-       engine->reset.rewind = execlists_reset_rewind;
-       engine->reset.cancel = execlists_reset_cancel;
-       engine->reset.finish = execlists_reset_finish;
+       /*
+        * We only need to kick the tasklet once for the high priority
+        * new context we add into the queue.
+        */
+       if (prio <= sched_engine->queue_priority_hint)
+               return;
 
-       engine->park = execlists_park;
-       engine->unpark = NULL;
+       rcu_read_lock();
 
-       engine->flags |= I915_ENGINE_SUPPORTS_STATS;
-       if (!intel_vgpu_active(engine->i915)) {
-               engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
-               if (can_preempt(engine)) {
-                       engine->flags |= I915_ENGINE_HAS_PREEMPTION;
-                       if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
-                               engine->flags |= I915_ENGINE_HAS_TIMESLICES;
-               }
-       }
+       /* Nothing currently active? We're overdue for a submission! */
+       inflight = execlists_active(&engine->execlists);
+       if (!inflight)
+               goto unlock;
 
-       if (intel_engine_has_preemption(engine))
-               engine->emit_bb_start = gen8_emit_bb_start;
-       else
-               engine->emit_bb_start = gen8_emit_bb_start_noarb;
+       /*
+        * If we are already the currently executing context, don't
+        * bother evaluating if we should preempt ourselves.
+        */
+       if (inflight->context == rq->context)
+               goto unlock;
+
+       ENGINE_TRACE(engine,
+                    "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
+                    prio,
+                    rq->fence.context, rq->fence.seqno,
+                    inflight->fence.context, inflight->fence.seqno,
+                    inflight->sched.attr.priority);
+
+       sched_engine->queue_priority_hint = prio;
+
+       /*
+        * Allow preemption of low -> normal -> high, but we do
+        * not allow low priority tasks to preempt other low priority
+        * tasks under the impression that latency for low priority
+        * tasks does not matter (as much as background throughput),
+        * so kiss.
+        */
+       if (prio >= max(I915_PRIORITY_NORMAL, rq_prio(inflight)))
+               tasklet_hi_schedule(&sched_engine->tasklet);
+
+unlock:
+       rcu_read_unlock();
+}
+
+static void execlists_set_default_submission(struct intel_engine_cs *engine)
+{
+       engine->submit_request = execlists_submit_request;
+       engine->sched_engine->schedule = i915_schedule;
+       engine->sched_engine->kick_backend = kick_execlists;
+       engine->sched_engine->tasklet.callback = execlists_submission_tasklet;
 }
 
 static void execlists_shutdown(struct intel_engine_cs *engine)
@@ -3106,7 +3185,7 @@ static void execlists_shutdown(struct intel_engine_cs *engine)
        /* Synchronise with residual timers and any softirq they raise */
        del_timer_sync(&engine->execlists.timer);
        del_timer_sync(&engine->execlists.preempt);
-       tasklet_kill(&engine->execlists.tasklet);
+       tasklet_kill(&engine->sched_engine->tasklet);
 }
 
 static void execlists_release(struct intel_engine_cs *engine)
@@ -3129,16 +3208,24 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
        engine->cops = &execlists_context_ops;
        engine->request_alloc = execlists_request_alloc;
 
+       engine->reset.prepare = execlists_reset_prepare;
+       engine->reset.rewind = execlists_reset_rewind;
+       engine->reset.cancel = execlists_reset_cancel;
+       engine->reset.finish = execlists_reset_finish;
+
+       engine->park = execlists_park;
+       engine->unpark = NULL;
+
        engine->emit_flush = gen8_emit_flush_xcs;
        engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
        engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs;
-       if (INTEL_GEN(engine->i915) >= 12) {
+       if (GRAPHICS_VER(engine->i915) >= 12) {
                engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs;
                engine->emit_flush = gen12_emit_flush_xcs;
        }
        engine->set_default_submission = execlists_set_default_submission;
 
-       if (INTEL_GEN(engine->i915) < 11) {
+       if (GRAPHICS_VER(engine->i915) < 11) {
                engine->irq_enable = gen8_logical_ring_enable_irq;
                engine->irq_disable = gen8_logical_ring_disable_irq;
        } else {
@@ -3149,13 +3236,29 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
                 * until a more refined solution exists.
                 */
        }
+       intel_engine_set_irq_handler(engine, execlists_irq_handler);
+
+       engine->flags |= I915_ENGINE_SUPPORTS_STATS;
+       if (!intel_vgpu_active(engine->i915)) {
+               engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+               if (can_preempt(engine)) {
+                       engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+                       if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+                               engine->flags |= I915_ENGINE_HAS_TIMESLICES;
+               }
+       }
+
+       if (intel_engine_has_preemption(engine))
+               engine->emit_bb_start = gen8_emit_bb_start;
+       else
+               engine->emit_bb_start = gen8_emit_bb_start_noarb;
 }
 
 static void logical_ring_default_irqs(struct intel_engine_cs *engine)
 {
        unsigned int shift = 0;
 
-       if (INTEL_GEN(engine->i915) < 11) {
+       if (GRAPHICS_VER(engine->i915) < 11) {
                const u8 irq_shifts[] = {
                        [RCS0]  = GEN8_RCS_IRQ_SHIFT,
                        [BCS0]  = GEN8_BCS_IRQ_SHIFT,
@@ -3175,7 +3278,7 @@ static void logical_ring_default_irqs(struct intel_engine_cs *engine)
 
 static void rcs_submission_override(struct intel_engine_cs *engine)
 {
-       switch (INTEL_GEN(engine->i915)) {
+       switch (GRAPHICS_VER(engine->i915)) {
        case 12:
                engine->emit_flush = gen12_emit_flush_rcs;
                engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs;
@@ -3198,7 +3301,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
        struct intel_uncore *uncore = engine->uncore;
        u32 base = engine->mmio_base;
 
-       tasklet_setup(&engine->execlists.tasklet, execlists_submission_tasklet);
+       tasklet_setup(&engine->sched_engine->tasklet, execlists_submission_tasklet);
        timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
        timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
 
@@ -3226,13 +3329,13 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
        execlists->csb_write =
                &engine->status_page.addr[intel_hws_csb_write_index(i915)];
 
-       if (INTEL_GEN(i915) < 11)
+       if (GRAPHICS_VER(i915) < 11)
                execlists->csb_size = GEN8_CSB_ENTRIES;
        else
                execlists->csb_size = GEN11_CSB_ENTRIES;
 
        engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
-       if (INTEL_GEN(engine->i915) >= 11) {
+       if (GRAPHICS_VER(engine->i915) >= 11) {
                execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
                execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
        }
@@ -3246,7 +3349,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
 
 static struct list_head *virtual_queue(struct virtual_engine *ve)
 {
-       return &ve->base.execlists.default_priolist.requests;
+       return &ve->base.sched_engine->default_priolist.requests;
 }
 
 static void rcu_virtual_context_destroy(struct work_struct *wrk)
@@ -3261,7 +3364,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
        if (unlikely(ve->request)) {
                struct i915_request *old;
 
-               spin_lock_irq(&ve->base.active.lock);
+               spin_lock_irq(&ve->base.sched_engine->lock);
 
                old = fetch_and_zero(&ve->request);
                if (old) {
@@ -3270,7 +3373,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
                        i915_request_put(old);
                }
 
-               spin_unlock_irq(&ve->base.active.lock);
+               spin_unlock_irq(&ve->base.sched_engine->lock);
        }
 
        /*
@@ -3280,7 +3383,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
         * rbtrees as in the case it is running in parallel, it may reinsert
         * the rb_node into a sibling.
         */
-       tasklet_kill(&ve->base.execlists.tasklet);
+       tasklet_kill(&ve->base.sched_engine->tasklet);
 
        /* Decouple ourselves from the siblings, no more access allowed. */
        for (n = 0; n < ve->num_siblings; n++) {
@@ -3290,21 +3393,24 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
                if (RB_EMPTY_NODE(node))
                        continue;
 
-               spin_lock_irq(&sibling->active.lock);
+               spin_lock_irq(&sibling->sched_engine->lock);
 
-               /* Detachment is lazily performed in the execlists tasklet */
+               /* Detachment is lazily performed in the sched_engine->tasklet */
                if (!RB_EMPTY_NODE(node))
                        rb_erase_cached(node, &sibling->execlists.virtual);
 
-               spin_unlock_irq(&sibling->active.lock);
+               spin_unlock_irq(&sibling->sched_engine->lock);
        }
-       GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
+       GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.sched_engine->tasklet));
        GEM_BUG_ON(!list_empty(virtual_queue(ve)));
 
        lrc_fini(&ve->context);
        intel_context_fini(&ve->context);
 
-       intel_breadcrumbs_free(ve->base.breadcrumbs);
+       if (ve->base.breadcrumbs)
+               intel_breadcrumbs_free(ve->base.breadcrumbs);
+       if (ve->base.sched_engine)
+               i915_sched_engine_put(ve->base.sched_engine);
        intel_engine_free_request_pool(&ve->base);
 
        kfree(ve->bonds);
@@ -3435,16 +3541,18 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
 
        ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
                     rq->fence.context, rq->fence.seqno,
-                    mask, ve->base.execlists.queue_priority_hint);
+                    mask, ve->base.sched_engine->queue_priority_hint);
 
        return mask;
 }
 
 static void virtual_submission_tasklet(struct tasklet_struct *t)
 {
+       struct i915_sched_engine *sched_engine =
+               from_tasklet(sched_engine, t, tasklet);
        struct virtual_engine * const ve =
-               from_tasklet(ve, t, base.execlists.tasklet);
-       const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
+               (struct virtual_engine *)sched_engine->private_data;
+       const int prio = READ_ONCE(sched_engine->queue_priority_hint);
        intel_engine_mask_t mask;
        unsigned int n;
 
@@ -3463,7 +3571,7 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
                if (!READ_ONCE(ve->request))
                        break; /* already handled by a sibling's tasklet */
 
-               spin_lock_irq(&sibling->active.lock);
+               spin_lock_irq(&sibling->sched_engine->lock);
 
                if (unlikely(!(mask & sibling->mask))) {
                        if (!RB_EMPTY_NODE(&node->rb)) {
@@ -3512,11 +3620,11 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
 submit_engine:
                GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
                node->prio = prio;
-               if (first && prio > sibling->execlists.queue_priority_hint)
-                       tasklet_hi_schedule(&sibling->execlists.tasklet);
+               if (first && prio > sibling->sched_engine->queue_priority_hint)
+                       tasklet_hi_schedule(&sibling->sched_engine->tasklet);
 
 unlock_engine:
-               spin_unlock_irq(&sibling->active.lock);
+               spin_unlock_irq(&sibling->sched_engine->lock);
 
                if (intel_context_inflight(&ve->context))
                        break;
@@ -3534,7 +3642,7 @@ static void virtual_submit_request(struct i915_request *rq)
 
        GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
 
-       spin_lock_irqsave(&ve->base.active.lock, flags);
+       spin_lock_irqsave(&ve->base.sched_engine->lock, flags);
 
        /* By the time we resubmit a request, it may be completed */
        if (__i915_request_is_complete(rq)) {
@@ -3548,16 +3656,16 @@ static void virtual_submit_request(struct i915_request *rq)
                i915_request_put(ve->request);
        }
 
-       ve->base.execlists.queue_priority_hint = rq_prio(rq);
+       ve->base.sched_engine->queue_priority_hint = rq_prio(rq);
        ve->request = i915_request_get(rq);
 
        GEM_BUG_ON(!list_empty(virtual_queue(ve)));
        list_move_tail(&rq->sched.link, virtual_queue(ve));
 
-       tasklet_hi_schedule(&ve->base.execlists.tasklet);
+       tasklet_hi_schedule(&ve->base.sched_engine->tasklet);
 
 unlock:
-       spin_unlock_irqrestore(&ve->base.active.lock, flags);
+       spin_unlock_irqrestore(&ve->base.sched_engine->lock, flags);
 }
 
 static struct ve_bond *
@@ -3641,19 +3749,25 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 
        snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
 
-       intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
        intel_engine_init_execlists(&ve->base);
 
+       ve->base.sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL);
+       if (!ve->base.sched_engine) {
+               err = -ENOMEM;
+               goto err_put;
+       }
+       ve->base.sched_engine->private_data = &ve->base;
+
        ve->base.cops = &virtual_context_ops;
        ve->base.request_alloc = execlists_request_alloc;
 
-       ve->base.schedule = i915_schedule;
+       ve->base.sched_engine->schedule = i915_schedule;
+       ve->base.sched_engine->kick_backend = kick_execlists;
        ve->base.submit_request = virtual_submit_request;
        ve->base.bond_execute = virtual_bond_execute;
 
        INIT_LIST_HEAD(virtual_queue(ve));
-       ve->base.execlists.queue_priority_hint = INT_MIN;
-       tasklet_setup(&ve->base.execlists.tasklet, virtual_submission_tasklet);
+       tasklet_setup(&ve->base.sched_engine->tasklet, virtual_submission_tasklet);
 
        intel_context_init(&ve->context, &ve->base);
 
@@ -3681,7 +3795,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
                 * layering if we handle cloning of the requests and
                 * submitting a copy into each backend.
                 */
-               if (sibling->execlists.tasklet.callback !=
+               if (sibling->sched_engine->tasklet.callback !=
                    execlists_submission_tasklet) {
                        err = -ENODEV;
                        goto err_put;
@@ -3736,34 +3850,6 @@ err_put:
        return ERR_PTR(err);
 }
 
-struct intel_context *
-intel_execlists_clone_virtual(struct intel_engine_cs *src)
-{
-       struct virtual_engine *se = to_virtual_engine(src);
-       struct intel_context *dst;
-
-       dst = intel_execlists_create_virtual(se->siblings,
-                                            se->num_siblings);
-       if (IS_ERR(dst))
-               return dst;
-
-       if (se->num_bonds) {
-               struct virtual_engine *de = to_virtual_engine(dst->engine);
-
-               de->bonds = kmemdup(se->bonds,
-                                   sizeof(*se->bonds) * se->num_bonds,
-                                   GFP_KERNEL);
-               if (!de->bonds) {
-                       intel_context_put(dst);
-                       return ERR_PTR(-ENOMEM);
-               }
-
-               de->num_bonds = se->num_bonds;
-       }
-
-       return dst;
-}
-
 int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
                                     const struct intel_engine_cs *master,
                                     const struct intel_engine_cs *sibling)
@@ -3809,16 +3895,17 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
                                   unsigned int max)
 {
        const struct intel_engine_execlists *execlists = &engine->execlists;
+       struct i915_sched_engine *sched_engine = engine->sched_engine;
        struct i915_request *rq, *last;
        unsigned long flags;
        unsigned int count;
        struct rb_node *rb;
 
-       spin_lock_irqsave(&engine->active.lock, flags);
+       spin_lock_irqsave(&sched_engine->lock, flags);
 
        last = NULL;
        count = 0;
-       list_for_each_entry(rq, &engine->active.requests, sched.link) {
+       list_for_each_entry(rq, &sched_engine->requests, sched.link) {
                if (count++ < max - 1)
                        show_request(m, rq, "\t\t", 0);
                else
@@ -3833,13 +3920,13 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
                show_request(m, last, "\t\t", 0);
        }
 
-       if (execlists->queue_priority_hint != INT_MIN)
+       if (sched_engine->queue_priority_hint != INT_MIN)
                drm_printf(m, "\t\tQueue priority hint: %d\n",
-                          READ_ONCE(execlists->queue_priority_hint));
+                          READ_ONCE(sched_engine->queue_priority_hint));
 
        last = NULL;
        count = 0;
-       for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+       for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
                struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
 
                priolist_for_each_request(rq, p) {
@@ -3881,14 +3968,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
                show_request(m, last, "\t\t", 0);
        }
 
-       spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-bool
-intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine)
-{
-       return engine->set_default_submission ==
-              execlists_set_default_submission;
+       spin_unlock_irqrestore(&sched_engine->lock, flags);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)