2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #ifndef __I915_GEM_CONTEXT_H__
26 #define __I915_GEM_CONTEXT_H__
28 #include <linux/bitops.h>
29 #include <linux/list.h>
30 #include <linux/radix-tree.h>
33 #include "i915_scheduler.h"
40 struct drm_i915_private;
41 struct drm_i915_file_private;
47 #define DEFAULT_CONTEXT_HANDLE 0
51 struct intel_context_ops {
52 void (*unpin)(struct intel_context *ce);
53 void (*destroy)(struct intel_context *ce);
57 * struct i915_gem_context - client state
59 * The struct i915_gem_context represents the combined view of the driver and
60 * logical hardware state for a particular client.
62 struct i915_gem_context {
63 /** i915: i915 device backpointer */
64 struct drm_i915_private *i915;
66 /** file_priv: owning file descriptor */
67 struct drm_i915_file_private *file_priv;
70 * @ppgtt: unique address space (GTT)
72 * In full-ppgtt mode, each context has its own address space ensuring
73 * complete seperation of one client from all others.
75 * In other modes, this is a NULL pointer with the expectation that
76 * the caller uses the shared global GTT.
78 struct i915_hw_ppgtt *ppgtt;
81 * @pid: process id of creator
83 * Note that who created the context may not be the principle user,
84 * as the context may be shared across a local socket. However,
85 * that should only affect the default context, all contexts created
86 * explicitly by the client are expected to be isolated.
91 * @name: arbitrary name
93 * A name is constructed for the context from the creator's process
94 * name, pid and user handle in order to uniquely identify the
95 * context in messages.
99 /** link: place with &drm_i915_private.context_list */
100 struct list_head link;
101 struct llist_node free_link;
104 * @ref: reference count
106 * A reference to a context is held by both the client who created it
107 * and on each request submitted to the hardware using the request
108 * (to ensure the hardware has access to the state until it has
109 * finished all pending writes). See i915_gem_context_get() and
110 * i915_gem_context_put() for access.
115 * @rcu: rcu_head for deferred freeing.
120 * @user_flags: small set of booleans controlled by the user
122 unsigned long user_flags;
123 #define UCONTEXT_NO_ZEROMAP 0
124 #define UCONTEXT_NO_ERROR_CAPTURE 1
125 #define UCONTEXT_BANNABLE 2
128 * @flags: small set of booleans
131 #define CONTEXT_BANNED 0
132 #define CONTEXT_CLOSED 1
133 #define CONTEXT_FORCE_SINGLE_SUBMISSION 2
136 * @hw_id: - unique identifier for the context
138 * The hardware needs to uniquely identify the context for a few
139 * functions like fault reporting, PASID, scheduling. The
140 * &drm_i915_private.context_hw_ida is used to assign a unqiue
141 * id for the lifetime of the context.
143 * @hw_id_pin_count: - number of times this context had been pinned
144 * for use (should be, at most, once per engine).
146 * @hw_id_link: - all contexts with an assigned id are tracked
147 * for possible repossession.
150 atomic_t hw_id_pin_count;
151 struct list_head hw_id_link;
154 * @user_handle: userspace identifier
156 * A unique per-file identifier is generated from
157 * &drm_i915_file_private.contexts.
161 struct i915_sched_attr sched;
163 /** engine: per-engine logical HW state */
164 struct intel_context {
165 struct i915_gem_context *gem_context;
166 struct intel_engine_cs *active;
167 struct i915_vma *state;
168 struct intel_ring *ring;
173 const struct intel_context_ops *ops;
174 } __engine[I915_NUM_ENGINES];
176 /** ring_size: size for allocating the per-engine ring buffer */
178 /** desc_template: invariant fields for the HW context descriptor */
181 /** guilty_count: How many times this context has caused a GPU hang. */
182 atomic_t guilty_count;
184 * @active_count: How many times this context was active during a GPU
185 * hang, but did not cause it.
187 atomic_t active_count;
189 #define CONTEXT_SCORE_GUILTY 10
190 #define CONTEXT_SCORE_BAN_THRESHOLD 40
191 /** ban_score: Accumulated score of all hangs caused by this context. */
194 /** remap_slice: Bitmask of cache lines that need remapping */
197 /** handles_vma: rbtree to look up our context specific obj/vma for
198 * the user handle. (user handles are per fd, but the binding is
199 * per vm, which may be one per context or shared with the global GTT)
201 struct radix_tree_root handles_vma;
203 /** handles_list: reverse list of all the rbtree entries in use for
204 * this context, which allows us to free all the allocations on
207 struct list_head handles_list;
210 static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
212 return test_bit(CONTEXT_CLOSED, &ctx->flags);
215 static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
217 GEM_BUG_ON(i915_gem_context_is_closed(ctx));
218 set_bit(CONTEXT_CLOSED, &ctx->flags);
221 static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
223 return test_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
226 static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
228 set_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
231 static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
233 clear_bit(UCONTEXT_NO_ERROR_CAPTURE, &ctx->user_flags);
236 static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
238 return test_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
241 static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
243 set_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
246 static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
248 clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
251 static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
253 return test_bit(CONTEXT_BANNED, &ctx->flags);
256 static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
258 set_bit(CONTEXT_BANNED, &ctx->flags);
261 static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
263 return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
266 static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
268 __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
271 int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx);
272 static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
274 if (atomic_inc_not_zero(&ctx->hw_id_pin_count))
277 return __i915_gem_context_pin_hw_id(ctx);
280 static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx)
282 GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == 0u);
283 atomic_dec(&ctx->hw_id_pin_count);
286 static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
288 return c->user_handle == DEFAULT_CONTEXT_HANDLE;
291 static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
293 return !ctx->file_priv;
296 static inline struct intel_context *
297 to_intel_context(struct i915_gem_context *ctx,
298 const struct intel_engine_cs *engine)
300 return &ctx->__engine[engine->id];
303 static inline struct intel_context *
304 intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
306 return engine->context_pin(engine, ctx);
309 static inline void __intel_context_pin(struct intel_context *ce)
311 GEM_BUG_ON(!ce->pin_count);
315 static inline void intel_context_unpin(struct intel_context *ce)
317 GEM_BUG_ON(!ce->pin_count);
321 GEM_BUG_ON(!ce->ops);
325 /* i915_gem_context.c */
326 int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
327 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
328 void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
330 int i915_gem_context_open(struct drm_i915_private *i915,
331 struct drm_file *file);
332 void i915_gem_context_close(struct drm_file *file);
334 int i915_switch_context(struct i915_request *rq);
335 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
337 void i915_gem_context_release(struct kref *ctx_ref);
338 struct i915_gem_context *
339 i915_gem_context_create_gvt(struct drm_device *dev);
341 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
342 struct drm_file *file);
343 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
344 struct drm_file *file);
345 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
346 struct drm_file *file_priv);
347 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
348 struct drm_file *file_priv);
349 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
350 struct drm_file *file);
352 struct i915_gem_context *
353 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
355 static inline struct i915_gem_context *
356 i915_gem_context_get(struct i915_gem_context *ctx)
362 static inline void i915_gem_context_put(struct i915_gem_context *ctx)
364 kref_put(&ctx->ref, i915_gem_context_release);
367 #endif /* !__I915_GEM_CONTEXT_H__ */