drm/i915: Add kick_backend function to i915_sched_engine
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_scheduler.c
index efa638c..035b88f 100644 (file)
@@ -40,7 +40,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
        return rb_entry(rb, struct i915_priolist, node);
 }
 
-static void assert_priolists(struct intel_engine_execlists * const execlists)
+static void assert_priolists(struct i915_sched_engine * const sched_engine)
 {
        struct rb_node *rb;
        long last_prio;
@@ -48,11 +48,11 @@ static void assert_priolists(struct intel_engine_execlists * const execlists)
        if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
                return;
 
-       GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
-                  rb_first(&execlists->queue.rb_root));
+       GEM_BUG_ON(rb_first_cached(&sched_engine->queue) !=
+                  rb_first(&sched_engine->queue.rb_root));
 
        last_prio = INT_MAX;
-       for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+       for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
                const struct i915_priolist *p = to_priolist(rb);
 
                GEM_BUG_ON(p->priority > last_prio);
@@ -63,21 +63,21 @@ static void assert_priolists(struct intel_engine_execlists * const execlists)
 struct list_head *
 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
 {
-       struct intel_engine_execlists * const execlists = &engine->execlists;
+       struct i915_sched_engine * const sched_engine = engine->sched_engine;
        struct i915_priolist *p;
        struct rb_node **parent, *rb;
        bool first = true;
 
-       lockdep_assert_held(&engine->active.lock);
-       assert_priolists(execlists);
+       lockdep_assert_held(&engine->sched_engine->lock);
+       assert_priolists(sched_engine);
 
-       if (unlikely(execlists->no_priolist))
+       if (unlikely(sched_engine->no_priolist))
                prio = I915_PRIORITY_NORMAL;
 
 find_priolist:
        /* most positive priority is scheduled first, equal priorities fifo */
        rb = NULL;
-       parent = &execlists->queue.rb_root.rb_node;
+       parent = &sched_engine->queue.rb_root.rb_node;
        while (*parent) {
                rb = *parent;
                p = to_priolist(rb);
@@ -92,7 +92,7 @@ find_priolist:
        }
 
        if (prio == I915_PRIORITY_NORMAL) {
-               p = &execlists->default_priolist;
+               p = &sched_engine->default_priolist;
        } else {
                p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
                /* Convert an allocation failure to a priority bump */
@@ -107,7 +107,7 @@ find_priolist:
                         * requests, so if userspace lied about their
                         * dependencies that reordering may be visible.
                         */
-                       execlists->no_priolist = true;
+                       sched_engine->no_priolist = true;
                        goto find_priolist;
                }
        }
@@ -116,7 +116,7 @@ find_priolist:
        INIT_LIST_HEAD(&p->requests);
 
        rb_link_node(&p->node, rb, parent);
-       rb_insert_color_cached(&p->node, &execlists->queue, first);
+       rb_insert_color_cached(&p->node, &sched_engine->queue, first);
 
        return &p->requests;
 }
@@ -147,9 +147,9 @@ sched_lock_engine(const struct i915_sched_node *node,
         * check that the rq still belongs to the newly locked engine.
         */
        while (locked != (engine = READ_ONCE(rq->engine))) {
-               spin_unlock(&locked->active.lock);
+               spin_unlock(&locked->sched_engine->lock);
                memset(cache, 0, sizeof(*cache));
-               spin_lock(&engine->active.lock);
+               spin_lock(&engine->sched_engine->lock);
                locked = engine;
        }
 
@@ -157,65 +157,6 @@ sched_lock_engine(const struct i915_sched_node *node,
        return locked;
 }
 
-static inline int rq_prio(const struct i915_request *rq)
-{
-       return rq->sched.attr.priority;
-}
-
-static inline bool need_preempt(int prio, int active)
-{
-       /*
-        * Allow preemption of low -> normal -> high, but we do
-        * not allow low priority tasks to preempt other low priority
-        * tasks under the impression that latency for low priority
-        * tasks does not matter (as much as background throughput),
-        * so kiss.
-        */
-       return prio >= max(I915_PRIORITY_NORMAL, active);
-}
-
-static void kick_submission(struct intel_engine_cs *engine,
-                           const struct i915_request *rq,
-                           int prio)
-{
-       const struct i915_request *inflight;
-
-       /*
-        * We only need to kick the tasklet once for the high priority
-        * new context we add into the queue.
-        */
-       if (prio <= engine->execlists.queue_priority_hint)
-               return;
-
-       rcu_read_lock();
-
-       /* Nothing currently active? We're overdue for a submission! */
-       inflight = execlists_active(&engine->execlists);
-       if (!inflight)
-               goto unlock;
-
-       /*
-        * If we are already the currently executing context, don't
-        * bother evaluating if we should preempt ourselves.
-        */
-       if (inflight->context == rq->context)
-               goto unlock;
-
-       ENGINE_TRACE(engine,
-                    "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
-                    prio,
-                    rq->fence.context, rq->fence.seqno,
-                    inflight->fence.context, inflight->fence.seqno,
-                    inflight->sched.attr.priority);
-
-       engine->execlists.queue_priority_hint = prio;
-       if (need_preempt(prio, rq_prio(inflight)))
-               tasklet_hi_schedule(&engine->execlists.tasklet);
-
-unlock:
-       rcu_read_unlock();
-}
-
 static void __i915_schedule(struct i915_sched_node *node,
                            const struct i915_sched_attr *attr)
 {
@@ -296,7 +237,7 @@ static void __i915_schedule(struct i915_sched_node *node,
 
        memset(&cache, 0, sizeof(cache));
        engine = node_to_request(node)->engine;
-       spin_lock(&engine->active.lock);
+       spin_lock(&engine->sched_engine->lock);
 
        /* Fifo and depth-first replacement ensure our deps execute before us */
        engine = sched_lock_engine(node, engine, &cache);
@@ -305,7 +246,7 @@ static void __i915_schedule(struct i915_sched_node *node,
 
                node = dep->signaler;
                engine = sched_lock_engine(node, engine, &cache);
-               lockdep_assert_held(&engine->active.lock);
+               lockdep_assert_held(&engine->sched_engine->lock);
 
                /* Recheck after acquiring the engine->timeline.lock */
                if (prio <= node->attr.priority || node_signaled(node))
@@ -335,10 +276,11 @@ static void __i915_schedule(struct i915_sched_node *node,
                }
 
                /* Defer (tasklet) submission until after all of our updates. */
-               kick_submission(engine, node_to_request(node), prio);
+               if (engine->sched_engine->kick_backend)
+                       engine->sched_engine->kick_backend(node_to_request(node), prio);
        }
 
-       spin_unlock(&engine->active.lock);
+       spin_unlock(&engine->sched_engine->lock);
 }
 
 void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
@@ -489,6 +431,49 @@ void i915_request_show_with_schedule(struct drm_printer *m,
        rcu_read_unlock();
 }
 
+void i915_sched_engine_free(struct kref *kref)
+{
+       struct i915_sched_engine *sched_engine =
+               container_of(kref, typeof(*sched_engine), ref);
+
+       kfree(sched_engine);
+}
+
+struct i915_sched_engine *
+i915_sched_engine_create(unsigned int subclass)
+{
+       struct i915_sched_engine *sched_engine;
+
+       sched_engine = kzalloc(sizeof(*sched_engine), GFP_KERNEL);
+       if (!sched_engine)
+               return NULL;
+
+       kref_init(&sched_engine->ref);
+
+       sched_engine->queue = RB_ROOT_CACHED;
+       sched_engine->queue_priority_hint = INT_MIN;
+
+       INIT_LIST_HEAD(&sched_engine->requests);
+       INIT_LIST_HEAD(&sched_engine->hold);
+
+       spin_lock_init(&sched_engine->lock);
+       lockdep_set_subclass(&sched_engine->lock, subclass);
+
+       /*
+        * Due to an interesting quirk in lockdep's internal debug tracking,
+        * after setting a subclass we must ensure the lock is used. Otherwise,
+        * nr_unused_locks is incremented once too often.
+        */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       local_irq_disable();
+       lock_map_acquire(&sched_engine->lock.dep_map);
+       lock_map_release(&sched_engine->lock.dep_map);
+       local_irq_enable();
+#endif
+
+       return sched_engine;
+}
+
 static void i915_global_scheduler_shrink(void)
 {
        kmem_cache_shrink(global.slab_dependencies);