2 * SPDX-License-Identifier: MIT
4 * Copyright © 2011-2012 Intel Corporation
8 * This file implements HW context support. On gen5+ a HW context consists of an
9 * opaque GPU object which is referenced at times of context saves and restores.
10 * With RC6 enabled, the context is also referenced as the GPU enters and exists
11 * from RC6 (GPU has it's own internal power context, except on gen5). Though
12 * something like a context does exist for the media ring, the code only
13 * supports contexts for the render ring.
15 * In software, there is a distinction between contexts created by the user,
16 * and the default HW context. The default HW context is used by GPU clients
17 * that do not request setup of their own hardware context. The default
18 * context's state is never restored to help prevent programming errors. This
19 * would happen if a client ran and piggy-backed off another clients GPU state.
20 * The default context only exists to give the GPU some offset to load as the
21 * current to invoke a save of the context we actually care about. In fact, the
22 * code could likely be constructed, albeit in a more complicated fashion, to
23 * never use the default context, though that limits the driver's ability to
24 * swap out, and/or destroy other contexts.
26 * All other contexts are created as a request by the GPU client. These contexts
27 * store GPU state, and thus allow GPU clients to not re-emit state (and
28 * potentially query certain state) at any time. The kernel driver makes
29 * certain that the appropriate commands are inserted.
31 * The context life cycle is semi-complicated in that context BOs may live
32 * longer than the context itself because of the way the hardware, and object
33 * tracking works. Below is a very crude representation of the state machine
34 * describing the context life.
35 * refcount pincount active
36 * S0: initial state 0 0 0
37 * S1: context created 1 0 0
38 * S2: context is currently running 2 1 X
39 * S3: GPU referenced, but not current 2 0 1
40 * S4: context is current, but destroyed 1 1 0
41 * S5: like S3, but destroyed 1 0 1
43 * The most common (but not all) transitions:
44 * S0->S1: client creates a context
45 * S1->S2: client submits execbuf with context
46 * S2->S3: other clients submits execbuf with context
47 * S3->S1: context object was retired
48 * S3->S2: clients submits another execbuf
49 * S2->S4: context destroy called with current context
50 * S3->S5->S0: destroy path
51 * S4->S5->S0: destroy path on current context
53 * There are two confusing terms used above:
54 * The "current context" means the context which is currently running on the
55 * GPU. The GPU has loaded its state already and has stored away the gtt
56 * offset of the BO. The GPU is not actively referencing the data at this
57 * offset, but it will on the next context switch. The only way to avoid this
58 * is to do a GPU reset.
60 * An "active context' is one which was previously the "current context" and is
61 * on the active list waiting for the next context switch to occur. Until this
62 * happens, the object must remain at the same gtt offset. It is therefore
63 * possible to destroy a context, but it is still active.
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
70 #include <drm/drm_syncobj.h>
72 #include "gt/gen6_ppgtt.h"
73 #include "gt/intel_context.h"
74 #include "gt/intel_context_param.h"
75 #include "gt/intel_engine_heartbeat.h"
76 #include "gt/intel_engine_user.h"
77 #include "gt/intel_gpu_commands.h"
78 #include "gt/intel_ring.h"
80 #include "i915_gem_context.h"
81 #include "i915_trace.h"
82 #include "i915_user_extensions.h"
84 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
86 static struct kmem_cache *slab_luts;
88 struct i915_lut_handle *i915_lut_handle_alloc(void)
90 return kmem_cache_alloc(slab_luts, GFP_KERNEL);
93 void i915_lut_handle_free(struct i915_lut_handle *lut)
95 return kmem_cache_free(slab_luts, lut);
98 static void lut_close(struct i915_gem_context *ctx)
100 struct radix_tree_iter iter;
103 mutex_lock(&ctx->lut_mutex);
105 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
106 struct i915_vma *vma = rcu_dereference_raw(*slot);
107 struct drm_i915_gem_object *obj = vma->obj;
108 struct i915_lut_handle *lut;
110 if (!kref_get_unless_zero(&obj->base.refcount))
113 spin_lock(&obj->lut_lock);
114 list_for_each_entry(lut, &obj->lut_list, obj_link) {
118 if (lut->handle != iter.index)
121 list_del(&lut->obj_link);
124 spin_unlock(&obj->lut_lock);
126 if (&lut->obj_link != &obj->lut_list) {
127 i915_lut_handle_free(lut);
128 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
130 i915_gem_object_put(obj);
133 i915_gem_object_put(obj);
136 mutex_unlock(&ctx->lut_mutex);
139 static struct intel_context *
140 lookup_user_engine(struct i915_gem_context *ctx,
142 const struct i915_engine_class_instance *ci)
143 #define LOOKUP_USER_INDEX BIT(0)
147 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
148 return ERR_PTR(-EINVAL);
150 if (!i915_gem_context_user_engines(ctx)) {
151 struct intel_engine_cs *engine;
153 engine = intel_engine_lookup_user(ctx->i915,
155 ci->engine_instance);
157 return ERR_PTR(-EINVAL);
159 idx = engine->legacy_idx;
161 idx = ci->engine_instance;
164 return i915_gem_context_get_engine(ctx, idx);
167 static int validate_priority(struct drm_i915_private *i915,
168 const struct drm_i915_gem_context_param *args)
170 s64 priority = args->value;
175 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
178 if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
179 priority < I915_CONTEXT_MIN_USER_PRIORITY)
182 if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
183 !capable(CAP_SYS_NICE))
189 static void proto_context_close(struct i915_gem_proto_context *pc)
195 if (pc->user_engines) {
196 for (i = 0; i < pc->num_user_engines; i++)
197 kfree(pc->user_engines[i].siblings);
198 kfree(pc->user_engines);
203 static int proto_context_set_persistence(struct drm_i915_private *i915,
204 struct i915_gem_proto_context *pc,
209 * Only contexts that are short-lived [that will expire or be
210 * reset] are allowed to survive past termination. We require
211 * hangcheck to ensure that the persistent requests are healthy.
213 if (!i915->params.enable_hangcheck)
216 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
218 /* To cancel a context we use "preempt-to-idle" */
219 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
223 * If the cancel fails, we then need to reset, cleanly!
225 * If the per-engine reset fails, all hope is lost! We resort
226 * to a full GPU reset in that unlikely case, but realistically
227 * if the engine could not reset, the full reset does not fare
228 * much better. The damage has been done.
230 * However, if we cannot reset an engine by itself, we cannot
231 * cleanup a hanging persistent context without causing
232 * colateral damage, and we should not pretend we can by
233 * exposing the interface.
235 if (!intel_has_reset_engine(&i915->gt))
238 pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
244 static struct i915_gem_proto_context *
245 proto_context_create(struct drm_i915_private *i915, unsigned int flags)
247 struct i915_gem_proto_context *pc, *err;
249 pc = kzalloc(sizeof(*pc), GFP_KERNEL);
251 return ERR_PTR(-ENOMEM);
253 pc->num_user_engines = -1;
254 pc->user_engines = NULL;
255 pc->user_flags = BIT(UCONTEXT_BANNABLE) |
256 BIT(UCONTEXT_RECOVERABLE);
257 if (i915->params.enable_hangcheck)
258 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
259 pc->sched.priority = I915_PRIORITY_NORMAL;
261 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
262 if (!HAS_EXECLISTS(i915)) {
263 err = ERR_PTR(-EINVAL);
266 pc->single_timeline = true;
272 proto_context_close(pc);
276 static int proto_context_register_locked(struct drm_i915_file_private *fpriv,
277 struct i915_gem_proto_context *pc,
283 lockdep_assert_held(&fpriv->proto_context_lock);
285 ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL);
289 old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL);
290 if (xa_is_err(old)) {
291 xa_erase(&fpriv->context_xa, *id);
299 static int proto_context_register(struct drm_i915_file_private *fpriv,
300 struct i915_gem_proto_context *pc,
305 mutex_lock(&fpriv->proto_context_lock);
306 ret = proto_context_register_locked(fpriv, pc, id);
307 mutex_unlock(&fpriv->proto_context_lock);
312 static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
313 struct i915_gem_proto_context *pc,
314 const struct drm_i915_gem_context_param *args)
316 struct drm_i915_private *i915 = fpriv->dev_priv;
317 struct i915_address_space *vm;
322 if (!HAS_FULL_PPGTT(i915))
325 if (upper_32_bits(args->value))
328 vm = i915_gem_vm_lookup(fpriv, args->value);
339 struct set_proto_ctx_engines {
340 struct drm_i915_private *i915;
341 unsigned num_engines;
342 struct i915_gem_proto_engine *engines;
346 set_proto_ctx_engines_balance(struct i915_user_extension __user *base,
349 struct i915_context_engines_load_balance __user *ext =
350 container_of_user(base, typeof(*ext), base);
351 const struct set_proto_ctx_engines *set = data;
352 struct drm_i915_private *i915 = set->i915;
353 struct intel_engine_cs **siblings;
354 u16 num_siblings, idx;
358 if (!HAS_EXECLISTS(i915))
361 if (get_user(idx, &ext->engine_index))
364 if (idx >= set->num_engines) {
365 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
366 idx, set->num_engines);
370 idx = array_index_nospec(idx, set->num_engines);
371 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) {
373 "Invalid placement[%d], already occupied\n", idx);
377 if (get_user(num_siblings, &ext->num_siblings))
380 err = check_user_mbz(&ext->flags);
384 err = check_user_mbz(&ext->mbz64);
388 if (num_siblings == 0)
391 siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL);
395 for (n = 0; n < num_siblings; n++) {
396 struct i915_engine_class_instance ci;
398 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
403 siblings[n] = intel_engine_lookup_user(i915,
408 "Invalid sibling[%d]: { class:%d, inst:%d }\n",
409 n, ci.engine_class, ci.engine_instance);
415 if (num_siblings == 1) {
416 set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
417 set->engines[idx].engine = siblings[0];
420 set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED;
421 set->engines[idx].num_siblings = num_siblings;
422 set->engines[idx].siblings = siblings;
434 set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
436 struct i915_context_engines_bond __user *ext =
437 container_of_user(base, typeof(*ext), base);
438 const struct set_proto_ctx_engines *set = data;
439 struct drm_i915_private *i915 = set->i915;
440 struct i915_engine_class_instance ci;
441 struct intel_engine_cs *master;
445 if (get_user(idx, &ext->virtual_index))
448 if (idx >= set->num_engines) {
450 "Invalid index for virtual engine: %d >= %d\n",
451 idx, set->num_engines);
455 idx = array_index_nospec(idx, set->num_engines);
456 if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) {
457 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
461 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) {
463 "Bonding with virtual engines not allowed\n");
467 err = check_user_mbz(&ext->flags);
471 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
472 err = check_user_mbz(&ext->mbz64[n]);
477 if (copy_from_user(&ci, &ext->master, sizeof(ci)))
480 master = intel_engine_lookup_user(i915,
485 "Unrecognised master engine: { class:%u, instance:%u }\n",
486 ci.engine_class, ci.engine_instance);
490 if (intel_engine_uses_guc(master)) {
491 DRM_DEBUG("bonding extension not supported with GuC submission");
495 if (get_user(num_bonds, &ext->num_bonds))
498 for (n = 0; n < num_bonds; n++) {
499 struct intel_engine_cs *bond;
501 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
504 bond = intel_engine_lookup_user(i915,
509 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
510 n, ci.engine_class, ci.engine_instance);
518 static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
519 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
520 [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
523 static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
524 struct i915_gem_proto_context *pc,
525 const struct drm_i915_gem_context_param *args)
527 struct drm_i915_private *i915 = fpriv->dev_priv;
528 struct set_proto_ctx_engines set = { .i915 = i915 };
529 struct i915_context_param_engines __user *user =
530 u64_to_user_ptr(args->value);
535 if (pc->num_user_engines >= 0) {
536 drm_dbg(&i915->drm, "Cannot set engines twice");
540 if (args->size < sizeof(*user) ||
541 !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) {
542 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
547 set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
548 /* RING_MASK has no shift so we can use it directly here */
549 if (set.num_engines > I915_EXEC_RING_MASK + 1)
552 set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL);
556 for (n = 0; n < set.num_engines; n++) {
557 struct i915_engine_class_instance ci;
558 struct intel_engine_cs *engine;
560 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
565 memset(&set.engines[n], 0, sizeof(set.engines[n]));
567 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
568 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE)
571 engine = intel_engine_lookup_user(i915,
576 "Invalid engine[%d]: { class:%d, instance:%d }\n",
577 n, ci.engine_class, ci.engine_instance);
582 set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
583 set.engines[n].engine = engine;
587 if (!get_user(extensions, &user->extensions))
588 err = i915_user_extensions(u64_to_user_ptr(extensions),
589 set_proto_ctx_engines_extensions,
590 ARRAY_SIZE(set_proto_ctx_engines_extensions),
597 pc->num_user_engines = set.num_engines;
598 pc->user_engines = set.engines;
603 static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
604 struct i915_gem_proto_context *pc,
605 struct drm_i915_gem_context_param *args)
607 struct drm_i915_private *i915 = fpriv->dev_priv;
608 struct drm_i915_gem_context_param_sseu user_sseu;
609 struct intel_sseu *sseu;
612 if (args->size < sizeof(user_sseu))
615 if (GRAPHICS_VER(i915) != 11)
618 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
625 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
628 if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0))
631 if (pc->num_user_engines >= 0) {
632 int idx = user_sseu.engine.engine_instance;
633 struct i915_gem_proto_engine *pe;
635 if (idx >= pc->num_user_engines)
638 pe = &pc->user_engines[idx];
640 /* Only render engine supports RPCS configuration. */
641 if (pe->engine->class != RENDER_CLASS)
646 /* Only render engine supports RPCS configuration. */
647 if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER)
650 /* There is only one render engine */
651 if (user_sseu.engine.engine_instance != 0)
654 sseu = &pc->legacy_rcs_sseu;
657 ret = i915_gem_user_to_context_sseu(&i915->gt, &user_sseu, sseu);
661 args->size = sizeof(user_sseu);
666 static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
667 struct i915_gem_proto_context *pc,
668 struct drm_i915_gem_context_param *args)
672 switch (args->param) {
673 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
676 else if (args->value)
677 pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE);
679 pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE);
682 case I915_CONTEXT_PARAM_BANNABLE:
685 else if (!capable(CAP_SYS_ADMIN) && !args->value)
687 else if (args->value)
688 pc->user_flags |= BIT(UCONTEXT_BANNABLE);
690 pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
693 case I915_CONTEXT_PARAM_RECOVERABLE:
696 else if (args->value)
697 pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
699 pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE);
702 case I915_CONTEXT_PARAM_PRIORITY:
703 ret = validate_priority(fpriv->dev_priv, args);
705 pc->sched.priority = args->value;
708 case I915_CONTEXT_PARAM_SSEU:
709 ret = set_proto_ctx_sseu(fpriv, pc, args);
712 case I915_CONTEXT_PARAM_VM:
713 ret = set_proto_ctx_vm(fpriv, pc, args);
716 case I915_CONTEXT_PARAM_ENGINES:
717 ret = set_proto_ctx_engines(fpriv, pc, args);
720 case I915_CONTEXT_PARAM_PERSISTENCE:
723 ret = proto_context_set_persistence(fpriv->dev_priv, pc,
727 case I915_CONTEXT_PARAM_NO_ZEROMAP:
728 case I915_CONTEXT_PARAM_BAN_PERIOD:
729 case I915_CONTEXT_PARAM_RINGSIZE:
738 static struct i915_address_space *
739 context_get_vm_rcu(struct i915_gem_context *ctx)
741 GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
744 struct i915_address_space *vm;
747 * We do not allow downgrading from full-ppgtt [to a shared
748 * global gtt], so ctx->vm cannot become NULL.
750 vm = rcu_dereference(ctx->vm);
751 if (!kref_get_unless_zero(&vm->ref))
755 * This ppgtt may have be reallocated between
756 * the read and the kref, and reassigned to a third
757 * context. In order to avoid inadvertent sharing
758 * of this ppgtt with that third context (and not
759 * src), we have to confirm that we have the same
760 * ppgtt after passing through the strong memory
761 * barrier implied by a successful
762 * kref_get_unless_zero().
764 * Once we have acquired the current ppgtt of ctx,
765 * we no longer care if it is released from ctx, as
766 * it cannot be reallocated elsewhere.
769 if (vm == rcu_access_pointer(ctx->vm))
770 return rcu_pointer_handoff(vm);
776 static int intel_context_set_gem(struct intel_context *ce,
777 struct i915_gem_context *ctx,
778 struct intel_sseu sseu)
782 GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
783 RCU_INIT_POINTER(ce->gem_context, ctx);
785 ce->ring_size = SZ_16K;
787 if (rcu_access_pointer(ctx->vm)) {
788 struct i915_address_space *vm;
791 vm = context_get_vm_rcu(ctx); /* hmm */
798 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
799 intel_engine_has_timeslices(ce->engine) &&
800 intel_engine_has_semaphores(ce->engine))
801 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
803 if (IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) &&
804 ctx->i915->params.request_timeout_ms) {
805 unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
807 intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
810 /* A valid SSEU has no zero fields */
811 if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
812 ret = intel_context_reconfigure_sseu(ce, sseu);
817 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
820 if (!e->engines[count])
823 intel_context_put(e->engines[count]);
828 static void free_engines(struct i915_gem_engines *e)
830 __free_engines(e, e->num_engines);
833 static void free_engines_rcu(struct rcu_head *rcu)
835 struct i915_gem_engines *engines =
836 container_of(rcu, struct i915_gem_engines, rcu);
838 i915_sw_fence_fini(&engines->fence);
839 free_engines(engines);
842 static int __i915_sw_fence_call
843 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
845 struct i915_gem_engines *engines =
846 container_of(fence, typeof(*engines), fence);
850 if (!list_empty(&engines->link)) {
851 struct i915_gem_context *ctx = engines->ctx;
854 spin_lock_irqsave(&ctx->stale.lock, flags);
855 list_del(&engines->link);
856 spin_unlock_irqrestore(&ctx->stale.lock, flags);
858 i915_gem_context_put(engines->ctx);
862 init_rcu_head(&engines->rcu);
863 call_rcu(&engines->rcu, free_engines_rcu);
870 static struct i915_gem_engines *alloc_engines(unsigned int count)
872 struct i915_gem_engines *e;
874 e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
878 i915_sw_fence_init(&e->fence, engines_notify);
882 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
883 struct intel_sseu rcs_sseu)
885 const struct intel_gt *gt = &ctx->i915->gt;
886 struct intel_engine_cs *engine;
887 struct i915_gem_engines *e, *err;
888 enum intel_engine_id id;
890 e = alloc_engines(I915_NUM_ENGINES);
892 return ERR_PTR(-ENOMEM);
894 for_each_engine(engine, gt, id) {
895 struct intel_context *ce;
896 struct intel_sseu sseu = {};
899 if (engine->legacy_idx == INVALID_ENGINE)
902 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
903 GEM_BUG_ON(e->engines[engine->legacy_idx]);
905 ce = intel_context_create(engine);
911 e->engines[engine->legacy_idx] = ce;
912 e->num_engines = max(e->num_engines, engine->legacy_idx + 1);
914 if (engine->class == RENDER_CLASS)
917 ret = intel_context_set_gem(ce, ctx, sseu);
932 static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
933 unsigned int num_engines,
934 struct i915_gem_proto_engine *pe)
936 struct i915_gem_engines *e, *err;
939 e = alloc_engines(num_engines);
940 for (n = 0; n < num_engines; n++) {
941 struct intel_context *ce;
944 switch (pe[n].type) {
945 case I915_GEM_ENGINE_TYPE_PHYSICAL:
946 ce = intel_context_create(pe[n].engine);
949 case I915_GEM_ENGINE_TYPE_BALANCED:
950 ce = intel_engine_create_virtual(pe[n].siblings,
954 case I915_GEM_ENGINE_TYPE_INVALID:
956 GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID);
967 ret = intel_context_set_gem(ce, ctx, pe->sseu);
973 e->num_engines = num_engines;
982 void i915_gem_context_release(struct kref *ref)
984 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
986 trace_i915_context_free(ctx);
987 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
989 mutex_destroy(&ctx->engines_mutex);
990 mutex_destroy(&ctx->lut_mutex);
993 mutex_destroy(&ctx->mutex);
998 static inline struct i915_gem_engines *
999 __context_engines_static(const struct i915_gem_context *ctx)
1001 return rcu_dereference_protected(ctx->engines, true);
1004 static void __reset_context(struct i915_gem_context *ctx,
1005 struct intel_engine_cs *engine)
1007 intel_gt_handle_error(engine->gt, engine->mask, 0,
1008 "context closure in %s", ctx->name);
1011 static bool __cancel_engine(struct intel_engine_cs *engine)
1014 * Send a "high priority pulse" down the engine to cause the
1015 * current request to be momentarily preempted. (If it fails to
1016 * be preempted, it will be reset). As we have marked our context
1017 * as banned, any incomplete request, including any running, will
1018 * be skipped following the preemption.
1020 * If there is no hangchecking (one of the reasons why we try to
1021 * cancel the context) and no forced preemption, there may be no
1022 * means by which we reset the GPU and evict the persistent hog.
1023 * Ergo if we are unable to inject a preemptive pulse that can
1024 * kill the banned context, we fallback to doing a local reset
1027 return intel_engine_pulse(engine) == 0;
1030 static struct intel_engine_cs *active_engine(struct intel_context *ce)
1032 struct intel_engine_cs *engine = NULL;
1033 struct i915_request *rq;
1035 if (intel_context_has_inflight(ce))
1036 return intel_context_inflight(ce);
1042 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
1043 * to the request to prevent it being transferred to a new timeline
1044 * (and onto a new timeline->requests list).
1047 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
1050 /* timeline is already completed upto this point? */
1051 if (!i915_request_get_rcu(rq))
1054 /* Check with the backend if the request is inflight */
1056 if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
1057 found = i915_request_active_engine(rq, &engine);
1059 i915_request_put(rq);
1068 static void kill_engines(struct i915_gem_engines *engines, bool ban)
1070 struct i915_gem_engines_iter it;
1071 struct intel_context *ce;
1074 * Map the user's engine back to the actual engines; one virtual
1075 * engine will be mapped to multiple engines, and using ctx->engine[]
1076 * the same engine may be have multiple instances in the user's map.
1077 * However, we only care about pending requests, so only include
1078 * engines on which there are incomplete requests.
1080 for_each_gem_engine(ce, engines, it) {
1081 struct intel_engine_cs *engine;
1083 if (ban && intel_context_ban(ce, NULL))
1087 * Check the current active state of this context; if we
1088 * are currently executing on the GPU we need to evict
1089 * ourselves. On the other hand, if we haven't yet been
1090 * submitted to the GPU or if everything is complete,
1091 * we have nothing to do.
1093 engine = active_engine(ce);
1095 /* First attempt to gracefully cancel the context */
1096 if (engine && !__cancel_engine(engine) && ban)
1098 * If we are unable to send a preemptive pulse to bump
1099 * the context from the GPU, we have to resort to a full
1100 * reset. We hope the collateral damage is worth it.
1102 __reset_context(engines->ctx, engine);
1106 static void kill_context(struct i915_gem_context *ctx)
1108 bool ban = (!i915_gem_context_is_persistent(ctx) ||
1109 !ctx->i915->params.enable_hangcheck);
1110 struct i915_gem_engines *pos, *next;
1112 spin_lock_irq(&ctx->stale.lock);
1113 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1114 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
1115 if (!i915_sw_fence_await(&pos->fence)) {
1116 list_del_init(&pos->link);
1120 spin_unlock_irq(&ctx->stale.lock);
1122 kill_engines(pos, ban);
1124 spin_lock_irq(&ctx->stale.lock);
1125 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
1126 list_safe_reset_next(pos, next, link);
1127 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
1129 i915_sw_fence_complete(&pos->fence);
1131 spin_unlock_irq(&ctx->stale.lock);
1134 static void engines_idle_release(struct i915_gem_context *ctx,
1135 struct i915_gem_engines *engines)
1137 struct i915_gem_engines_iter it;
1138 struct intel_context *ce;
1140 INIT_LIST_HEAD(&engines->link);
1142 engines->ctx = i915_gem_context_get(ctx);
1144 for_each_gem_engine(ce, engines, it) {
1147 /* serialises with execbuf */
1148 set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
1149 if (!intel_context_pin_if_active(ce))
1152 /* Wait until context is finally scheduled out and retired */
1153 err = i915_sw_fence_await_active(&engines->fence,
1155 I915_ACTIVE_AWAIT_BARRIER);
1156 intel_context_unpin(ce);
1161 spin_lock_irq(&ctx->stale.lock);
1162 if (!i915_gem_context_is_closed(ctx))
1163 list_add_tail(&engines->link, &ctx->stale.engines);
1164 spin_unlock_irq(&ctx->stale.lock);
1167 if (list_empty(&engines->link)) /* raced, already closed */
1168 kill_engines(engines, true);
1170 i915_sw_fence_commit(&engines->fence);
1173 static void set_closed_name(struct i915_gem_context *ctx)
1177 /* Replace '[]' with '<>' to indicate closed in debug prints */
1179 s = strrchr(ctx->name, '[');
1185 s = strchr(s + 1, ']');
1190 static void context_close(struct i915_gem_context *ctx)
1192 struct i915_address_space *vm;
1194 /* Flush any concurrent set_engines() */
1195 mutex_lock(&ctx->engines_mutex);
1196 engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
1197 i915_gem_context_set_closed(ctx);
1198 mutex_unlock(&ctx->engines_mutex);
1200 mutex_lock(&ctx->mutex);
1202 set_closed_name(ctx);
1204 vm = i915_gem_context_vm(ctx);
1209 drm_syncobj_put(ctx->syncobj);
1211 ctx->file_priv = ERR_PTR(-EBADF);
1214 * The LUT uses the VMA as a backpointer to unref the object,
1215 * so we need to clear the LUT before we close all the VMA (inside
1220 spin_lock(&ctx->i915->gem.contexts.lock);
1221 list_del(&ctx->link);
1222 spin_unlock(&ctx->i915->gem.contexts.lock);
1224 mutex_unlock(&ctx->mutex);
1227 * If the user has disabled hangchecking, we can not be sure that
1228 * the batches will ever complete after the context is closed,
1229 * keeping the context and all resources pinned forever. So in this
1230 * case we opt to forcibly kill off all remaining requests on
1235 i915_gem_context_put(ctx);
1238 static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
1240 if (i915_gem_context_is_persistent(ctx) == state)
1245 * Only contexts that are short-lived [that will expire or be
1246 * reset] are allowed to survive past termination. We require
1247 * hangcheck to ensure that the persistent requests are healthy.
1249 if (!ctx->i915->params.enable_hangcheck)
1252 i915_gem_context_set_persistence(ctx);
1254 /* To cancel a context we use "preempt-to-idle" */
1255 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
1259 * If the cancel fails, we then need to reset, cleanly!
1261 * If the per-engine reset fails, all hope is lost! We resort
1262 * to a full GPU reset in that unlikely case, but realistically
1263 * if the engine could not reset, the full reset does not fare
1264 * much better. The damage has been done.
1266 * However, if we cannot reset an engine by itself, we cannot
1267 * cleanup a hanging persistent context without causing
1268 * colateral damage, and we should not pretend we can by
1269 * exposing the interface.
1271 if (!intel_has_reset_engine(&ctx->i915->gt))
1274 i915_gem_context_clear_persistence(ctx);
1280 static inline struct i915_gem_engines *
1281 __context_engines_await(const struct i915_gem_context *ctx,
1284 struct i915_gem_engines *engines;
1288 engines = rcu_dereference(ctx->engines);
1289 GEM_BUG_ON(!engines);
1292 *user_engines = i915_gem_context_user_engines(ctx);
1294 /* successful await => strong mb */
1295 if (unlikely(!i915_sw_fence_await(&engines->fence)))
1298 if (likely(engines == rcu_access_pointer(ctx->engines)))
1301 i915_sw_fence_complete(&engines->fence);
1309 context_apply_all(struct i915_gem_context *ctx,
1310 void (*fn)(struct intel_context *ce, void *data),
1313 struct i915_gem_engines_iter it;
1314 struct i915_gem_engines *e;
1315 struct intel_context *ce;
1317 e = __context_engines_await(ctx, NULL);
1318 for_each_gem_engine(ce, e, it)
1320 i915_sw_fence_complete(&e->fence);
1323 static struct i915_gem_context *
1324 i915_gem_create_context(struct drm_i915_private *i915,
1325 const struct i915_gem_proto_context *pc)
1327 struct i915_gem_context *ctx;
1328 struct i915_address_space *vm = NULL;
1329 struct i915_gem_engines *e;
1333 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1335 return ERR_PTR(-ENOMEM);
1337 kref_init(&ctx->ref);
1339 ctx->sched = pc->sched;
1340 mutex_init(&ctx->mutex);
1341 INIT_LIST_HEAD(&ctx->link);
1343 spin_lock_init(&ctx->stale.lock);
1344 INIT_LIST_HEAD(&ctx->stale.engines);
1347 vm = i915_vm_get(pc->vm);
1348 } else if (HAS_FULL_PPGTT(i915)) {
1349 struct i915_ppgtt *ppgtt;
1351 ppgtt = i915_ppgtt_create(&i915->gt);
1352 if (IS_ERR(ppgtt)) {
1353 drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
1355 err = PTR_ERR(ppgtt);
1361 RCU_INIT_POINTER(ctx->vm, i915_vm_open(vm));
1363 /* i915_vm_open() takes a reference */
1367 mutex_init(&ctx->engines_mutex);
1368 if (pc->num_user_engines >= 0) {
1369 i915_gem_context_set_user_engines(ctx);
1370 e = user_engines(ctx, pc->num_user_engines, pc->user_engines);
1372 i915_gem_context_clear_user_engines(ctx);
1373 e = default_engines(ctx, pc->legacy_rcs_sseu);
1379 RCU_INIT_POINTER(ctx->engines, e);
1381 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
1382 mutex_init(&ctx->lut_mutex);
1384 /* NB: Mark all slices as needing a remap so that when the context first
1385 * loads it will restore whatever remap state already exists. If there
1386 * is no remap info, it will be a NOP. */
1387 ctx->remap_slice = ALL_L3_SLICES(i915);
1389 ctx->user_flags = pc->user_flags;
1391 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
1392 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
1394 if (pc->single_timeline) {
1395 err = drm_syncobj_create(&ctx->syncobj,
1396 DRM_SYNCOBJ_CREATE_SIGNALED,
1402 trace_i915_context_create(ctx);
1410 i915_vm_close(ctx->vm);
1413 return ERR_PTR(err);
1416 static void init_contexts(struct i915_gem_contexts *gc)
1418 spin_lock_init(&gc->lock);
1419 INIT_LIST_HEAD(&gc->list);
1422 void i915_gem_init__contexts(struct drm_i915_private *i915)
1424 init_contexts(&i915->gem.contexts);
1427 static void gem_context_register(struct i915_gem_context *ctx,
1428 struct drm_i915_file_private *fpriv,
1431 struct drm_i915_private *i915 = ctx->i915;
1434 ctx->file_priv = fpriv;
1436 ctx->pid = get_task_pid(current, PIDTYPE_PID);
1437 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
1438 current->comm, pid_nr(ctx->pid));
1440 /* And finally expose ourselves to userspace via the idr */
1441 old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
1444 spin_lock(&i915->gem.contexts.lock);
1445 list_add_tail(&ctx->link, &i915->gem.contexts.list);
1446 spin_unlock(&i915->gem.contexts.lock);
1449 int i915_gem_context_open(struct drm_i915_private *i915,
1450 struct drm_file *file)
1452 struct drm_i915_file_private *file_priv = file->driver_priv;
1453 struct i915_gem_proto_context *pc;
1454 struct i915_gem_context *ctx;
1457 mutex_init(&file_priv->proto_context_lock);
1458 xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC);
1460 /* 0 reserved for the default context */
1461 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1);
1463 /* 0 reserved for invalid/unassigned ppgtt */
1464 xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
1466 pc = proto_context_create(i915, 0);
1472 ctx = i915_gem_create_context(i915, pc);
1473 proto_context_close(pc);
1479 gem_context_register(ctx, file_priv, 0);
1484 xa_destroy(&file_priv->vm_xa);
1485 xa_destroy(&file_priv->context_xa);
1486 xa_destroy(&file_priv->proto_context_xa);
1487 mutex_destroy(&file_priv->proto_context_lock);
1491 void i915_gem_context_close(struct drm_file *file)
1493 struct drm_i915_file_private *file_priv = file->driver_priv;
1494 struct i915_gem_proto_context *pc;
1495 struct i915_address_space *vm;
1496 struct i915_gem_context *ctx;
1499 xa_for_each(&file_priv->proto_context_xa, idx, pc)
1500 proto_context_close(pc);
1501 xa_destroy(&file_priv->proto_context_xa);
1502 mutex_destroy(&file_priv->proto_context_lock);
1504 xa_for_each(&file_priv->context_xa, idx, ctx)
1506 xa_destroy(&file_priv->context_xa);
1508 xa_for_each(&file_priv->vm_xa, idx, vm)
1510 xa_destroy(&file_priv->vm_xa);
1513 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
1514 struct drm_file *file)
1516 struct drm_i915_private *i915 = to_i915(dev);
1517 struct drm_i915_gem_vm_control *args = data;
1518 struct drm_i915_file_private *file_priv = file->driver_priv;
1519 struct i915_ppgtt *ppgtt;
1523 if (!HAS_FULL_PPGTT(i915))
1529 ppgtt = i915_ppgtt_create(&i915->gt);
1531 return PTR_ERR(ppgtt);
1533 if (args->extensions) {
1534 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
1541 err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
1542 xa_limit_32b, GFP_KERNEL);
1546 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1551 i915_vm_put(&ppgtt->vm);
1555 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
1556 struct drm_file *file)
1558 struct drm_i915_file_private *file_priv = file->driver_priv;
1559 struct drm_i915_gem_vm_control *args = data;
1560 struct i915_address_space *vm;
1565 if (args->extensions)
1568 vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1576 static int get_ppgtt(struct drm_i915_file_private *file_priv,
1577 struct i915_gem_context *ctx,
1578 struct drm_i915_gem_context_param *args)
1580 struct i915_address_space *vm;
1584 if (!rcu_access_pointer(ctx->vm))
1588 vm = context_get_vm_rcu(ctx);
1593 err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1599 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1609 i915_gem_user_to_context_sseu(struct intel_gt *gt,
1610 const struct drm_i915_gem_context_param_sseu *user,
1611 struct intel_sseu *context)
1613 const struct sseu_dev_info *device = >->info.sseu;
1614 struct drm_i915_private *i915 = gt->i915;
1616 /* No zeros in any field. */
1617 if (!user->slice_mask || !user->subslice_mask ||
1618 !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1622 if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1626 * Some future proofing on the types since the uAPI is wider than the
1627 * current internal implementation.
1629 if (overflows_type(user->slice_mask, context->slice_mask) ||
1630 overflows_type(user->subslice_mask, context->subslice_mask) ||
1631 overflows_type(user->min_eus_per_subslice,
1632 context->min_eus_per_subslice) ||
1633 overflows_type(user->max_eus_per_subslice,
1634 context->max_eus_per_subslice))
1637 /* Check validity against hardware. */
1638 if (user->slice_mask & ~device->slice_mask)
1641 if (user->subslice_mask & ~device->subslice_mask[0])
1644 if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1647 context->slice_mask = user->slice_mask;
1648 context->subslice_mask = user->subslice_mask;
1649 context->min_eus_per_subslice = user->min_eus_per_subslice;
1650 context->max_eus_per_subslice = user->max_eus_per_subslice;
1652 /* Part specific restrictions. */
1653 if (GRAPHICS_VER(i915) == 11) {
1654 unsigned int hw_s = hweight8(device->slice_mask);
1655 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1656 unsigned int req_s = hweight8(context->slice_mask);
1657 unsigned int req_ss = hweight8(context->subslice_mask);
1660 * Only full subslice enablement is possible if more than one
1661 * slice is turned on.
1663 if (req_s > 1 && req_ss != hw_ss_per_s)
1667 * If more than four (SScount bitfield limit) subslices are
1668 * requested then the number has to be even.
1670 if (req_ss > 4 && (req_ss & 1))
1674 * If only one slice is enabled and subslice count is below the
1675 * device full enablement, it must be at most half of the all
1676 * available subslices.
1678 if (req_s == 1 && req_ss < hw_ss_per_s &&
1679 req_ss > (hw_ss_per_s / 2))
1682 /* ABI restriction - VME use case only. */
1684 /* All slices or one slice only. */
1685 if (req_s != 1 && req_s != hw_s)
1689 * Half subslices or full enablement only when one slice is
1693 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1696 /* No EU configuration changes. */
1697 if ((user->min_eus_per_subslice !=
1698 device->max_eus_per_subslice) ||
1699 (user->max_eus_per_subslice !=
1700 device->max_eus_per_subslice))
1707 static int set_sseu(struct i915_gem_context *ctx,
1708 struct drm_i915_gem_context_param *args)
1710 struct drm_i915_private *i915 = ctx->i915;
1711 struct drm_i915_gem_context_param_sseu user_sseu;
1712 struct intel_context *ce;
1713 struct intel_sseu sseu;
1714 unsigned long lookup;
1717 if (args->size < sizeof(user_sseu))
1720 if (GRAPHICS_VER(i915) != 11)
1723 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1730 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1734 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1735 lookup |= LOOKUP_USER_INDEX;
1737 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1741 /* Only render engine supports RPCS configuration. */
1742 if (ce->engine->class != RENDER_CLASS) {
1747 ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
1751 ret = intel_context_reconfigure_sseu(ce, sseu);
1755 args->size = sizeof(user_sseu);
1758 intel_context_put(ce);
1763 set_persistence(struct i915_gem_context *ctx,
1764 const struct drm_i915_gem_context_param *args)
1769 return __context_set_persistence(ctx, args->value);
1772 static void __apply_priority(struct intel_context *ce, void *arg)
1774 struct i915_gem_context *ctx = arg;
1776 if (!intel_engine_has_timeslices(ce->engine))
1779 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
1780 intel_engine_has_semaphores(ce->engine))
1781 intel_context_set_use_semaphores(ce);
1783 intel_context_clear_use_semaphores(ce);
1786 static int set_priority(struct i915_gem_context *ctx,
1787 const struct drm_i915_gem_context_param *args)
1791 err = validate_priority(ctx->i915, args);
1795 ctx->sched.priority = args->value;
1796 context_apply_all(ctx, __apply_priority, ctx);
1801 static int ctx_setparam(struct drm_i915_file_private *fpriv,
1802 struct i915_gem_context *ctx,
1803 struct drm_i915_gem_context_param *args)
1807 switch (args->param) {
1808 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1811 else if (args->value)
1812 i915_gem_context_set_no_error_capture(ctx);
1814 i915_gem_context_clear_no_error_capture(ctx);
1817 case I915_CONTEXT_PARAM_BANNABLE:
1820 else if (!capable(CAP_SYS_ADMIN) && !args->value)
1822 else if (args->value)
1823 i915_gem_context_set_bannable(ctx);
1825 i915_gem_context_clear_bannable(ctx);
1828 case I915_CONTEXT_PARAM_RECOVERABLE:
1831 else if (args->value)
1832 i915_gem_context_set_recoverable(ctx);
1834 i915_gem_context_clear_recoverable(ctx);
1837 case I915_CONTEXT_PARAM_PRIORITY:
1838 ret = set_priority(ctx, args);
1841 case I915_CONTEXT_PARAM_SSEU:
1842 ret = set_sseu(ctx, args);
1845 case I915_CONTEXT_PARAM_PERSISTENCE:
1846 ret = set_persistence(ctx, args);
1849 case I915_CONTEXT_PARAM_NO_ZEROMAP:
1850 case I915_CONTEXT_PARAM_BAN_PERIOD:
1851 case I915_CONTEXT_PARAM_RINGSIZE:
1852 case I915_CONTEXT_PARAM_VM:
1853 case I915_CONTEXT_PARAM_ENGINES:
1863 struct i915_gem_proto_context *pc;
1864 struct drm_i915_file_private *fpriv;
1867 static int create_setparam(struct i915_user_extension __user *ext, void *data)
1869 struct drm_i915_gem_context_create_ext_setparam local;
1870 const struct create_ext *arg = data;
1872 if (copy_from_user(&local, ext, sizeof(local)))
1875 if (local.param.ctx_id)
1878 return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param);
1881 static int invalid_ext(struct i915_user_extension __user *ext, void *data)
1886 static const i915_user_extension_fn create_extensions[] = {
1887 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
1888 [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext,
1891 static bool client_is_banned(struct drm_i915_file_private *file_priv)
1893 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
1896 static inline struct i915_gem_context *
1897 __context_lookup(struct drm_i915_file_private *file_priv, u32 id)
1899 struct i915_gem_context *ctx;
1902 ctx = xa_load(&file_priv->context_xa, id);
1903 if (ctx && !kref_get_unless_zero(&ctx->ref))
1910 static struct i915_gem_context *
1911 finalize_create_context_locked(struct drm_i915_file_private *file_priv,
1912 struct i915_gem_proto_context *pc, u32 id)
1914 struct i915_gem_context *ctx;
1917 lockdep_assert_held(&file_priv->proto_context_lock);
1919 ctx = i915_gem_create_context(file_priv->dev_priv, pc);
1923 gem_context_register(ctx, file_priv, id);
1925 old = xa_erase(&file_priv->proto_context_xa, id);
1926 GEM_BUG_ON(old != pc);
1927 proto_context_close(pc);
1929 /* One for the xarray and one for the caller */
1930 return i915_gem_context_get(ctx);
1933 struct i915_gem_context *
1934 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
1936 struct i915_gem_proto_context *pc;
1937 struct i915_gem_context *ctx;
1939 ctx = __context_lookup(file_priv, id);
1943 mutex_lock(&file_priv->proto_context_lock);
1944 /* Try one more time under the lock */
1945 ctx = __context_lookup(file_priv, id);
1947 pc = xa_load(&file_priv->proto_context_xa, id);
1949 ctx = ERR_PTR(-ENOENT);
1951 ctx = finalize_create_context_locked(file_priv, pc, id);
1953 mutex_unlock(&file_priv->proto_context_lock);
1958 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
1959 struct drm_file *file)
1961 struct drm_i915_private *i915 = to_i915(dev);
1962 struct drm_i915_gem_context_create_ext *args = data;
1963 struct create_ext ext_data;
1967 if (!DRIVER_CAPS(i915)->has_logical_contexts)
1970 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
1973 ret = intel_gt_terminally_wedged(&i915->gt);
1977 ext_data.fpriv = file->driver_priv;
1978 if (client_is_banned(ext_data.fpriv)) {
1980 "client %s[%d] banned from creating ctx\n",
1981 current->comm, task_pid_nr(current));
1985 ext_data.pc = proto_context_create(i915, args->flags);
1986 if (IS_ERR(ext_data.pc))
1987 return PTR_ERR(ext_data.pc);
1989 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
1990 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
1992 ARRAY_SIZE(create_extensions),
1998 if (GRAPHICS_VER(i915) > 12) {
1999 struct i915_gem_context *ctx;
2001 /* Get ourselves a context ID */
2002 ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL,
2003 xa_limit_32b, GFP_KERNEL);
2007 ctx = i915_gem_create_context(i915, ext_data.pc);
2013 proto_context_close(ext_data.pc);
2014 gem_context_register(ctx, ext_data.fpriv, id);
2016 ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id);
2022 drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
2027 proto_context_close(ext_data.pc);
2031 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2032 struct drm_file *file)
2034 struct drm_i915_gem_context_destroy *args = data;
2035 struct drm_i915_file_private *file_priv = file->driver_priv;
2036 struct i915_gem_proto_context *pc;
2037 struct i915_gem_context *ctx;
2045 /* We need to hold the proto-context lock here to prevent races
2046 * with finalize_create_context_locked().
2048 mutex_lock(&file_priv->proto_context_lock);
2049 ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2050 pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id);
2051 mutex_unlock(&file_priv->proto_context_lock);
2055 GEM_WARN_ON(ctx && pc);
2058 proto_context_close(pc);
2066 static int get_sseu(struct i915_gem_context *ctx,
2067 struct drm_i915_gem_context_param *args)
2069 struct drm_i915_gem_context_param_sseu user_sseu;
2070 struct intel_context *ce;
2071 unsigned long lookup;
2074 if (args->size == 0)
2076 else if (args->size < sizeof(user_sseu))
2079 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2086 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2090 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2091 lookup |= LOOKUP_USER_INDEX;
2093 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2097 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2099 intel_context_put(ce);
2103 user_sseu.slice_mask = ce->sseu.slice_mask;
2104 user_sseu.subslice_mask = ce->sseu.subslice_mask;
2105 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2106 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2108 intel_context_unlock_pinned(ce);
2109 intel_context_put(ce);
2111 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2116 args->size = sizeof(user_sseu);
2121 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2122 struct drm_file *file)
2124 struct drm_i915_file_private *file_priv = file->driver_priv;
2125 struct drm_i915_gem_context_param *args = data;
2126 struct i915_gem_context *ctx;
2129 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2131 return PTR_ERR(ctx);
2133 switch (args->param) {
2134 case I915_CONTEXT_PARAM_GTT_SIZE:
2137 if (rcu_access_pointer(ctx->vm))
2138 args->value = rcu_dereference(ctx->vm)->total;
2140 args->value = to_i915(dev)->ggtt.vm.total;
2144 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2146 args->value = i915_gem_context_no_error_capture(ctx);
2149 case I915_CONTEXT_PARAM_BANNABLE:
2151 args->value = i915_gem_context_is_bannable(ctx);
2154 case I915_CONTEXT_PARAM_RECOVERABLE:
2156 args->value = i915_gem_context_is_recoverable(ctx);
2159 case I915_CONTEXT_PARAM_PRIORITY:
2161 args->value = ctx->sched.priority;
2164 case I915_CONTEXT_PARAM_SSEU:
2165 ret = get_sseu(ctx, args);
2168 case I915_CONTEXT_PARAM_VM:
2169 ret = get_ppgtt(file_priv, ctx, args);
2172 case I915_CONTEXT_PARAM_PERSISTENCE:
2174 args->value = i915_gem_context_is_persistent(ctx);
2177 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2178 case I915_CONTEXT_PARAM_BAN_PERIOD:
2179 case I915_CONTEXT_PARAM_ENGINES:
2180 case I915_CONTEXT_PARAM_RINGSIZE:
2186 i915_gem_context_put(ctx);
2190 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2191 struct drm_file *file)
2193 struct drm_i915_file_private *file_priv = file->driver_priv;
2194 struct drm_i915_gem_context_param *args = data;
2195 struct i915_gem_proto_context *pc;
2196 struct i915_gem_context *ctx;
2199 mutex_lock(&file_priv->proto_context_lock);
2200 ctx = __context_lookup(file_priv, args->ctx_id);
2202 pc = xa_load(&file_priv->proto_context_xa, args->ctx_id);
2204 /* Contexts should be finalized inside
2205 * GEM_CONTEXT_CREATE starting with graphics
2208 WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12);
2209 ret = set_proto_ctx_param(file_priv, pc, args);
2214 mutex_unlock(&file_priv->proto_context_lock);
2217 ret = ctx_setparam(file_priv, ctx, args);
2218 i915_gem_context_put(ctx);
2224 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2225 void *data, struct drm_file *file)
2227 struct drm_i915_private *i915 = to_i915(dev);
2228 struct drm_i915_reset_stats *args = data;
2229 struct i915_gem_context *ctx;
2231 if (args->flags || args->pad)
2234 ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
2236 return PTR_ERR(ctx);
2239 * We opt for unserialised reads here. This may result in tearing
2240 * in the extremely unlikely event of a GPU hang on this context
2241 * as we are querying them. If we need that extra layer of protection,
2242 * we should wrap the hangstats with a seqlock.
2245 if (capable(CAP_SYS_ADMIN))
2246 args->reset_count = i915_reset_count(&i915->gpu_error);
2248 args->reset_count = 0;
2250 args->batch_active = atomic_read(&ctx->guilty_count);
2251 args->batch_pending = atomic_read(&ctx->active_count);
2253 i915_gem_context_put(ctx);
2257 /* GEM context-engines iterator: for_each_gem_engine() */
2258 struct intel_context *
2259 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2261 const struct i915_gem_engines *e = it->engines;
2262 struct intel_context *ctx;
2268 if (it->idx >= e->num_engines)
2271 ctx = e->engines[it->idx++];
2277 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2278 #include "selftests/mock_context.c"
2279 #include "selftests/i915_gem_context.c"
2282 void i915_gem_context_module_exit(void)
2284 kmem_cache_destroy(slab_luts);
2287 int __init i915_gem_context_module_init(void)
2289 slab_luts = KMEM_CACHE(i915_lut_handle, 0);