drm/i915/guc: Remove intel_context:number_committed_requests counter
authorAlan Previn <alan.previn.teres.alexis@intel.com>
Thu, 6 Oct 2022 22:51:21 +0000 (15:51 -0700)
committerJohn Harrison <John.C.Harrison@Intel.com>
Thu, 27 Oct 2022 00:29:47 +0000 (17:29 -0700)
With the introduction of the delayed disable-sched behavior,
we use the GuC's xarray of valid guc-id's as a way to
identify if new requests had been added to a context
when the said context is being checked for closure.

Additionally that prior change also closes the race for when
a new incoming request fails to cancel the pending
delayed disable-sched worker.

With these two complementary checks, we see no more
use for intel_context:guc_state:number_committed_requests.

Signed-off-by: Alan Previn <alan.previn.teres.alexis@intel.com>
Reviewed-by: John Harrison <John.C.Harrison@Intel.com>
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221006225121.826257-3-alan.previn.teres.alexis@intel.com
drivers/gpu/drm/i915/gt/intel_context_types.h
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c

index 6a49fa7..e36670f 100644 (file)
@@ -199,8 +199,6 @@ struct intel_context {
                 * context's submissions is complete.
                 */
                struct i915_sw_fence blocked;
-               /** @number_committed_requests: number of committed requests */
-               int number_committed_requests;
                /** @requests: list of active requests on this context */
                struct list_head requests;
                /** @prio: the context's current guc priority */
index e8c934f..4ccb29f 100644 (file)
@@ -370,25 +370,6 @@ static inline void decr_context_blocked(struct intel_context *ce)
        ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
 }
 
-static inline bool context_has_committed_requests(struct intel_context *ce)
-{
-       return !!ce->guc_state.number_committed_requests;
-}
-
-static inline void incr_context_committed_requests(struct intel_context *ce)
-{
-       lockdep_assert_held(&ce->guc_state.lock);
-       ++ce->guc_state.number_committed_requests;
-       GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
-}
-
-static inline void decr_context_committed_requests(struct intel_context *ce)
-{
-       lockdep_assert_held(&ce->guc_state.lock);
-       --ce->guc_state.number_committed_requests;
-       GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
-}
-
 static struct intel_context *
 request_to_scheduling_context(struct i915_request *rq)
 {
@@ -3180,7 +3161,6 @@ static void __guc_context_destroy(struct intel_context *ce)
                   ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
                   ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
                   ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
-       GEM_BUG_ON(ce->guc_state.number_committed_requests);
 
        lrc_fini(ce);
        intel_context_fini(ce);
@@ -3449,8 +3429,6 @@ static void remove_from_context(struct i915_request *rq)
 
        guc_prio_fini(rq, ce);
 
-       decr_context_committed_requests(ce);
-
        spin_unlock_irq(&ce->guc_state.lock);
 
        atomic_dec(&ce->guc_id.ref);
@@ -3659,7 +3637,6 @@ out:
 
                list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
        }
-       incr_context_committed_requests(ce);
        spin_unlock_irqrestore(&ce->guc_state.lock, flags);
 
        return 0;