1 /* SPDX-License-Identifier: MIT */
3 * Copyright © 2019 Intel Corporation
6 #ifndef __INTEL_CONTEXT_TYPES__
7 #define __INTEL_CONTEXT_TYPES__
9 #include <linux/average.h>
10 #include <linux/kref.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/types.h>
15 #include "i915_active_types.h"
16 #include "i915_utils.h"
17 #include "intel_engine_types.h"
18 #include "intel_sseu.h"
20 #define CONTEXT_REDZONE POISON_INUSE
22 DECLARE_EWMA(runtime, 3, 8);
24 struct i915_gem_context;
25 struct i915_gem_ww_ctx;
27 struct intel_breadcrumbs;
31 struct intel_context_ops {
33 #define COPS_HAS_INFLIGHT_BIT 0
34 #define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT)
36 int (*alloc)(struct intel_context *ce);
38 int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
39 int (*pin)(struct intel_context *ce, void *vaddr);
40 void (*unpin)(struct intel_context *ce);
41 void (*post_unpin)(struct intel_context *ce);
43 void (*enter)(struct intel_context *ce);
44 void (*exit)(struct intel_context *ce);
46 void (*reset)(struct intel_context *ce);
47 void (*destroy)(struct kref *kref);
50 struct intel_context {
52 * Note: Some fields may be accessed under RCU.
54 * Unless otherwise noted a field can safely be assumed to be protected
55 * by strong reference counting.
58 struct kref ref; /* no kref_get_unless_zero()! */
62 struct intel_engine_cs *engine;
63 struct intel_engine_cs *inflight;
64 #define __intel_context_inflight(engine) ptr_mask_bits(engine, 3)
65 #define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3)
66 #define intel_context_inflight(ce) \
67 __intel_context_inflight(READ_ONCE((ce)->inflight))
68 #define intel_context_inflight_count(ce) \
69 __intel_context_inflight_count(READ_ONCE((ce)->inflight))
71 struct i915_address_space *vm;
72 struct i915_gem_context __rcu *gem_context;
75 * @signal_lock protects the list of requests that need signaling,
76 * @signals. While there are any requests that need signaling,
77 * we add the context to the breadcrumbs worker, and remove it
78 * upon completion/cancellation of the last request.
80 struct list_head signal_link; /* Accessed under RCU */
81 struct list_head signals; /* Guarded by signal_lock */
82 spinlock_t signal_lock; /* protects signals, the list of requests */
84 struct i915_vma *state;
86 struct intel_ring *ring;
87 struct intel_timeline *timeline;
90 #define CONTEXT_BARRIER_BIT 0
91 #define CONTEXT_ALLOC_BIT 1
92 #define CONTEXT_INIT_BIT 2
93 #define CONTEXT_VALID_BIT 3
94 #define CONTEXT_CLOSED_BIT 4
95 #define CONTEXT_USE_SEMAPHORES 5
96 #define CONTEXT_BANNED 6
97 #define CONTEXT_FORCE_SINGLE_SUBMISSION 7
98 #define CONTEXT_NOPREEMPT 8
112 u32 tag; /* cookie passed to HW to track this context on submission */
114 /* Time on GPU as tracked by the hw. */
116 struct ewma_runtime avg;
119 I915_SELFTEST_DECLARE(u32 num_underflow);
120 I915_SELFTEST_DECLARE(u32 max_underflow);
123 unsigned int active_count; /* protected by timeline->mutex */
126 struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
129 * active: Active tracker for the rq activity (inc. external) on this
130 * intel_context object.
132 struct i915_active active;
134 const struct intel_context_ops *ops;
136 /** sseu: Control eu/slice partitioning */
137 struct intel_sseu sseu;
139 u8 wa_bb_page; /* if set, page num reserved for context workarounds */
142 #endif /* __INTEL_CONTEXT_TYPES__ */