2 * SPDX-License-Identifier: MIT
4 * Copyright © 2011-2012 Intel Corporation
8 * This file implements HW context support. On gen5+ a HW context consists of an
9 * opaque GPU object which is referenced at times of context saves and restores.
10 * With RC6 enabled, the context is also referenced as the GPU enters and exists
11 * from RC6 (GPU has it's own internal power context, except on gen5). Though
12 * something like a context does exist for the media ring, the code only
13 * supports contexts for the render ring.
15 * In software, there is a distinction between contexts created by the user,
16 * and the default HW context. The default HW context is used by GPU clients
17 * that do not request setup of their own hardware context. The default
18 * context's state is never restored to help prevent programming errors. This
19 * would happen if a client ran and piggy-backed off another clients GPU state.
20 * The default context only exists to give the GPU some offset to load as the
21 * current to invoke a save of the context we actually care about. In fact, the
22 * code could likely be constructed, albeit in a more complicated fashion, to
23 * never use the default context, though that limits the driver's ability to
24 * swap out, and/or destroy other contexts.
26 * All other contexts are created as a request by the GPU client. These contexts
27 * store GPU state, and thus allow GPU clients to not re-emit state (and
28 * potentially query certain state) at any time. The kernel driver makes
29 * certain that the appropriate commands are inserted.
31 * The context life cycle is semi-complicated in that context BOs may live
32 * longer than the context itself because of the way the hardware, and object
33 * tracking works. Below is a very crude representation of the state machine
34 * describing the context life.
35 * refcount pincount active
36 * S0: initial state 0 0 0
37 * S1: context created 1 0 0
38 * S2: context is currently running 2 1 X
39 * S3: GPU referenced, but not current 2 0 1
40 * S4: context is current, but destroyed 1 1 0
41 * S5: like S3, but destroyed 1 0 1
43 * The most common (but not all) transitions:
44 * S0->S1: client creates a context
45 * S1->S2: client submits execbuf with context
46 * S2->S3: other clients submits execbuf with context
47 * S3->S1: context object was retired
48 * S3->S2: clients submits another execbuf
49 * S2->S4: context destroy called with current context
50 * S3->S5->S0: destroy path
51 * S4->S5->S0: destroy path on current context
53 * There are two confusing terms used above:
54 * The "current context" means the context which is currently running on the
55 * GPU. The GPU has loaded its state already and has stored away the gtt
56 * offset of the BO. The GPU is not actively referencing the data at this
57 * offset, but it will on the next context switch. The only way to avoid this
58 * is to do a GPU reset.
60 * An "active context' is one which was previously the "current context" and is
61 * on the active list waiting for the next context switch to occur. Until this
62 * happens, the object must remain at the same gtt offset. It is therefore
63 * possible to destroy a context, but it is still active.
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
70 #include <drm/drm_cache.h>
71 #include <drm/drm_syncobj.h>
73 #include "gt/gen6_ppgtt.h"
74 #include "gt/intel_context.h"
75 #include "gt/intel_context_param.h"
76 #include "gt/intel_engine_heartbeat.h"
77 #include "gt/intel_engine_user.h"
78 #include "gt/intel_gpu_commands.h"
79 #include "gt/intel_ring.h"
81 #include "pxp/intel_pxp.h"
83 #include "i915_file_private.h"
84 #include "i915_gem_context.h"
85 #include "i915_trace.h"
86 #include "i915_user_extensions.h"
88 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
90 static struct kmem_cache *slab_luts;
92 struct i915_lut_handle *i915_lut_handle_alloc(void)
94 return kmem_cache_alloc(slab_luts, GFP_KERNEL);
97 void i915_lut_handle_free(struct i915_lut_handle *lut)
99 return kmem_cache_free(slab_luts, lut);
102 static void lut_close(struct i915_gem_context *ctx)
104 struct radix_tree_iter iter;
107 mutex_lock(&ctx->lut_mutex);
109 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
110 struct i915_vma *vma = rcu_dereference_raw(*slot);
111 struct drm_i915_gem_object *obj = vma->obj;
112 struct i915_lut_handle *lut;
114 if (!kref_get_unless_zero(&obj->base.refcount))
117 spin_lock(&obj->lut_lock);
118 list_for_each_entry(lut, &obj->lut_list, obj_link) {
122 if (lut->handle != iter.index)
125 list_del(&lut->obj_link);
128 spin_unlock(&obj->lut_lock);
130 if (&lut->obj_link != &obj->lut_list) {
131 i915_lut_handle_free(lut);
132 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
134 i915_gem_object_put(obj);
137 i915_gem_object_put(obj);
140 mutex_unlock(&ctx->lut_mutex);
143 static struct intel_context *
144 lookup_user_engine(struct i915_gem_context *ctx,
146 const struct i915_engine_class_instance *ci)
147 #define LOOKUP_USER_INDEX BIT(0)
151 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
152 return ERR_PTR(-EINVAL);
154 if (!i915_gem_context_user_engines(ctx)) {
155 struct intel_engine_cs *engine;
157 engine = intel_engine_lookup_user(ctx->i915,
159 ci->engine_instance);
161 return ERR_PTR(-EINVAL);
163 idx = engine->legacy_idx;
165 idx = ci->engine_instance;
168 return i915_gem_context_get_engine(ctx, idx);
171 static int validate_priority(struct drm_i915_private *i915,
172 const struct drm_i915_gem_context_param *args)
174 s64 priority = args->value;
179 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
182 if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
183 priority < I915_CONTEXT_MIN_USER_PRIORITY)
186 if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
187 !capable(CAP_SYS_NICE))
193 static void proto_context_close(struct drm_i915_private *i915,
194 struct i915_gem_proto_context *pc)
199 intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref);
202 if (pc->user_engines) {
203 for (i = 0; i < pc->num_user_engines; i++)
204 kfree(pc->user_engines[i].siblings);
205 kfree(pc->user_engines);
210 static int proto_context_set_persistence(struct drm_i915_private *i915,
211 struct i915_gem_proto_context *pc,
216 * Only contexts that are short-lived [that will expire or be
217 * reset] are allowed to survive past termination. We require
218 * hangcheck to ensure that the persistent requests are healthy.
220 if (!i915->params.enable_hangcheck)
223 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
225 /* To cancel a context we use "preempt-to-idle" */
226 if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
230 * If the cancel fails, we then need to reset, cleanly!
232 * If the per-engine reset fails, all hope is lost! We resort
233 * to a full GPU reset in that unlikely case, but realistically
234 * if the engine could not reset, the full reset does not fare
235 * much better. The damage has been done.
237 * However, if we cannot reset an engine by itself, we cannot
238 * cleanup a hanging persistent context without causing
239 * colateral damage, and we should not pretend we can by
240 * exposing the interface.
242 if (!intel_has_reset_engine(to_gt(i915)))
245 pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
251 static int proto_context_set_protected(struct drm_i915_private *i915,
252 struct i915_gem_proto_context *pc,
258 pc->uses_protected_content = false;
259 } else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) {
261 } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
262 !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
265 pc->uses_protected_content = true;
268 * protected context usage requires the PXP session to be up,
269 * which in turn requires the device to be active.
271 pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
273 if (!intel_pxp_is_active(&to_gt(i915)->pxp))
274 ret = intel_pxp_start(&to_gt(i915)->pxp);
280 static struct i915_gem_proto_context *
281 proto_context_create(struct drm_i915_private *i915, unsigned int flags)
283 struct i915_gem_proto_context *pc, *err;
285 pc = kzalloc(sizeof(*pc), GFP_KERNEL);
287 return ERR_PTR(-ENOMEM);
289 pc->num_user_engines = -1;
290 pc->user_engines = NULL;
291 pc->user_flags = BIT(UCONTEXT_BANNABLE) |
292 BIT(UCONTEXT_RECOVERABLE);
293 if (i915->params.enable_hangcheck)
294 pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
295 pc->sched.priority = I915_PRIORITY_NORMAL;
297 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
298 if (!HAS_EXECLISTS(i915)) {
299 err = ERR_PTR(-EINVAL);
302 pc->single_timeline = true;
308 proto_context_close(i915, pc);
312 static int proto_context_register_locked(struct drm_i915_file_private *fpriv,
313 struct i915_gem_proto_context *pc,
319 lockdep_assert_held(&fpriv->proto_context_lock);
321 ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL);
325 old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL);
326 if (xa_is_err(old)) {
327 xa_erase(&fpriv->context_xa, *id);
335 static int proto_context_register(struct drm_i915_file_private *fpriv,
336 struct i915_gem_proto_context *pc,
341 mutex_lock(&fpriv->proto_context_lock);
342 ret = proto_context_register_locked(fpriv, pc, id);
343 mutex_unlock(&fpriv->proto_context_lock);
348 static struct i915_address_space *
349 i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
351 struct i915_address_space *vm;
353 xa_lock(&file_priv->vm_xa);
354 vm = xa_load(&file_priv->vm_xa, id);
357 xa_unlock(&file_priv->vm_xa);
362 static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
363 struct i915_gem_proto_context *pc,
364 const struct drm_i915_gem_context_param *args)
366 struct drm_i915_private *i915 = fpriv->dev_priv;
367 struct i915_address_space *vm;
372 if (!HAS_FULL_PPGTT(i915))
375 if (upper_32_bits(args->value))
378 vm = i915_gem_vm_lookup(fpriv, args->value);
389 struct set_proto_ctx_engines {
390 struct drm_i915_private *i915;
391 unsigned num_engines;
392 struct i915_gem_proto_engine *engines;
396 set_proto_ctx_engines_balance(struct i915_user_extension __user *base,
399 struct i915_context_engines_load_balance __user *ext =
400 container_of_user(base, typeof(*ext), base);
401 const struct set_proto_ctx_engines *set = data;
402 struct drm_i915_private *i915 = set->i915;
403 struct intel_engine_cs **siblings;
404 u16 num_siblings, idx;
408 if (!HAS_EXECLISTS(i915))
411 if (get_user(idx, &ext->engine_index))
414 if (idx >= set->num_engines) {
415 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
416 idx, set->num_engines);
420 idx = array_index_nospec(idx, set->num_engines);
421 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) {
423 "Invalid placement[%d], already occupied\n", idx);
427 if (get_user(num_siblings, &ext->num_siblings))
430 err = check_user_mbz(&ext->flags);
434 err = check_user_mbz(&ext->mbz64);
438 if (num_siblings == 0)
441 siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL);
445 for (n = 0; n < num_siblings; n++) {
446 struct i915_engine_class_instance ci;
448 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
453 siblings[n] = intel_engine_lookup_user(i915,
458 "Invalid sibling[%d]: { class:%d, inst:%d }\n",
459 n, ci.engine_class, ci.engine_instance);
465 if (num_siblings == 1) {
466 set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
467 set->engines[idx].engine = siblings[0];
470 set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED;
471 set->engines[idx].num_siblings = num_siblings;
472 set->engines[idx].siblings = siblings;
484 set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
486 struct i915_context_engines_bond __user *ext =
487 container_of_user(base, typeof(*ext), base);
488 const struct set_proto_ctx_engines *set = data;
489 struct drm_i915_private *i915 = set->i915;
490 struct i915_engine_class_instance ci;
491 struct intel_engine_cs *master;
495 if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) &&
496 !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) {
498 "Bonding not supported on this platform\n");
502 if (get_user(idx, &ext->virtual_index))
505 if (idx >= set->num_engines) {
507 "Invalid index for virtual engine: %d >= %d\n",
508 idx, set->num_engines);
512 idx = array_index_nospec(idx, set->num_engines);
513 if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) {
514 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
518 if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) {
520 "Bonding with virtual engines not allowed\n");
524 err = check_user_mbz(&ext->flags);
528 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
529 err = check_user_mbz(&ext->mbz64[n]);
534 if (copy_from_user(&ci, &ext->master, sizeof(ci)))
537 master = intel_engine_lookup_user(i915,
542 "Unrecognised master engine: { class:%u, instance:%u }\n",
543 ci.engine_class, ci.engine_instance);
547 if (intel_engine_uses_guc(master)) {
548 DRM_DEBUG("bonding extension not supported with GuC submission");
552 if (get_user(num_bonds, &ext->num_bonds))
555 for (n = 0; n < num_bonds; n++) {
556 struct intel_engine_cs *bond;
558 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
561 bond = intel_engine_lookup_user(i915,
566 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
567 n, ci.engine_class, ci.engine_instance);
576 set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
579 struct i915_context_engines_parallel_submit __user *ext =
580 container_of_user(base, typeof(*ext), base);
581 const struct set_proto_ctx_engines *set = data;
582 struct drm_i915_private *i915 = set->i915;
583 struct i915_engine_class_instance prev_engine;
585 int err = 0, n, i, j;
586 u16 slot, width, num_siblings;
587 struct intel_engine_cs **siblings = NULL;
588 intel_engine_mask_t prev_mask;
590 if (get_user(slot, &ext->engine_index))
593 if (get_user(width, &ext->width))
596 if (get_user(num_siblings, &ext->num_siblings))
599 if (!intel_uc_uses_guc_submission(&to_gt(i915)->uc) &&
601 drm_dbg(&i915->drm, "Only 1 sibling (%d) supported in non-GuC mode\n",
606 if (slot >= set->num_engines) {
607 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
608 slot, set->num_engines);
612 if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) {
614 "Invalid placement[%d], already occupied\n", slot);
618 if (get_user(flags, &ext->flags))
622 drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags);
626 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
627 err = check_user_mbz(&ext->mbz64[n]);
633 drm_dbg(&i915->drm, "Width (%d) < 2\n", width);
637 if (num_siblings < 1) {
638 drm_dbg(&i915->drm, "Number siblings (%d) < 1\n",
643 siblings = kmalloc_array(num_siblings * width,
649 /* Create contexts / engines */
650 for (i = 0; i < width; ++i) {
651 intel_engine_mask_t current_mask = 0;
653 for (j = 0; j < num_siblings; ++j) {
654 struct i915_engine_class_instance ci;
656 n = i * num_siblings + j;
657 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
663 intel_engine_lookup_user(i915, ci.engine_class,
667 "Invalid sibling[%d]: { class:%d, inst:%d }\n",
668 n, ci.engine_class, ci.engine_instance);
674 * We don't support breadcrumb handshake on these
677 if (siblings[n]->class == RENDER_CLASS ||
678 siblings[n]->class == COMPUTE_CLASS) {
684 if (prev_engine.engine_class !=
687 "Mismatched class %d, %d\n",
688 prev_engine.engine_class,
696 current_mask |= siblings[n]->logical_mask;
700 if (current_mask != prev_mask << 1) {
702 "Non contiguous logical mask 0x%x, 0x%x\n",
703 prev_mask, current_mask);
708 prev_mask = current_mask;
711 set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL;
712 set->engines[slot].num_siblings = num_siblings;
713 set->engines[slot].width = width;
714 set->engines[slot].siblings = siblings;
724 static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
725 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
726 [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
727 [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] =
728 set_proto_ctx_engines_parallel_submit,
731 static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
732 struct i915_gem_proto_context *pc,
733 const struct drm_i915_gem_context_param *args)
735 struct drm_i915_private *i915 = fpriv->dev_priv;
736 struct set_proto_ctx_engines set = { .i915 = i915 };
737 struct i915_context_param_engines __user *user =
738 u64_to_user_ptr(args->value);
743 if (pc->num_user_engines >= 0) {
744 drm_dbg(&i915->drm, "Cannot set engines twice");
748 if (args->size < sizeof(*user) ||
749 !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) {
750 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
755 set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
756 /* RING_MASK has no shift so we can use it directly here */
757 if (set.num_engines > I915_EXEC_RING_MASK + 1)
760 set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL);
764 for (n = 0; n < set.num_engines; n++) {
765 struct i915_engine_class_instance ci;
766 struct intel_engine_cs *engine;
768 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
773 memset(&set.engines[n], 0, sizeof(set.engines[n]));
775 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
776 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE)
779 engine = intel_engine_lookup_user(i915,
784 "Invalid engine[%d]: { class:%d, instance:%d }\n",
785 n, ci.engine_class, ci.engine_instance);
790 set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
791 set.engines[n].engine = engine;
795 if (!get_user(extensions, &user->extensions))
796 err = i915_user_extensions(u64_to_user_ptr(extensions),
797 set_proto_ctx_engines_extensions,
798 ARRAY_SIZE(set_proto_ctx_engines_extensions),
805 pc->num_user_engines = set.num_engines;
806 pc->user_engines = set.engines;
811 static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
812 struct i915_gem_proto_context *pc,
813 struct drm_i915_gem_context_param *args)
815 struct drm_i915_private *i915 = fpriv->dev_priv;
816 struct drm_i915_gem_context_param_sseu user_sseu;
817 struct intel_sseu *sseu;
820 if (args->size < sizeof(user_sseu))
823 if (GRAPHICS_VER(i915) != 11)
826 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
833 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
836 if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0))
839 if (pc->num_user_engines >= 0) {
840 int idx = user_sseu.engine.engine_instance;
841 struct i915_gem_proto_engine *pe;
843 if (idx >= pc->num_user_engines)
846 pe = &pc->user_engines[idx];
848 /* Only render engine supports RPCS configuration. */
849 if (pe->engine->class != RENDER_CLASS)
854 /* Only render engine supports RPCS configuration. */
855 if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER)
858 /* There is only one render engine */
859 if (user_sseu.engine.engine_instance != 0)
862 sseu = &pc->legacy_rcs_sseu;
865 ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu);
869 args->size = sizeof(user_sseu);
874 static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
875 struct i915_gem_proto_context *pc,
876 struct drm_i915_gem_context_param *args)
880 switch (args->param) {
881 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
884 else if (args->value)
885 pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE);
887 pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE);
890 case I915_CONTEXT_PARAM_BANNABLE:
893 else if (!capable(CAP_SYS_ADMIN) && !args->value)
895 else if (args->value)
896 pc->user_flags |= BIT(UCONTEXT_BANNABLE);
897 else if (pc->uses_protected_content)
900 pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
903 case I915_CONTEXT_PARAM_RECOVERABLE:
906 else if (!args->value)
907 pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE);
908 else if (pc->uses_protected_content)
911 pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
914 case I915_CONTEXT_PARAM_PRIORITY:
915 ret = validate_priority(fpriv->dev_priv, args);
917 pc->sched.priority = args->value;
920 case I915_CONTEXT_PARAM_SSEU:
921 ret = set_proto_ctx_sseu(fpriv, pc, args);
924 case I915_CONTEXT_PARAM_VM:
925 ret = set_proto_ctx_vm(fpriv, pc, args);
928 case I915_CONTEXT_PARAM_ENGINES:
929 ret = set_proto_ctx_engines(fpriv, pc, args);
932 case I915_CONTEXT_PARAM_PERSISTENCE:
935 ret = proto_context_set_persistence(fpriv->dev_priv, pc,
939 case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
940 ret = proto_context_set_protected(fpriv->dev_priv, pc,
944 case I915_CONTEXT_PARAM_NO_ZEROMAP:
945 case I915_CONTEXT_PARAM_BAN_PERIOD:
946 case I915_CONTEXT_PARAM_RINGSIZE:
955 static int intel_context_set_gem(struct intel_context *ce,
956 struct i915_gem_context *ctx,
957 struct intel_sseu sseu)
961 GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
962 RCU_INIT_POINTER(ce->gem_context, ctx);
964 GEM_BUG_ON(intel_context_is_pinned(ce));
965 ce->ring_size = SZ_16K;
968 ce->vm = i915_gem_context_get_eb_vm(ctx);
970 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
971 intel_engine_has_timeslices(ce->engine) &&
972 intel_engine_has_semaphores(ce->engine))
973 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
975 if (CONFIG_DRM_I915_REQUEST_TIMEOUT &&
976 ctx->i915->params.request_timeout_ms) {
977 unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
979 intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
982 /* A valid SSEU has no zero fields */
983 if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
984 ret = intel_context_reconfigure_sseu(ce, sseu);
989 static void __unpin_engines(struct i915_gem_engines *e, unsigned int count)
992 struct intel_context *ce = e->engines[count], *child;
994 if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags))
997 for_each_child(ce, child)
998 intel_context_unpin(child);
999 intel_context_unpin(ce);
1003 static void unpin_engines(struct i915_gem_engines *e)
1005 __unpin_engines(e, e->num_engines);
1008 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
1011 if (!e->engines[count])
1014 intel_context_put(e->engines[count]);
1019 static void free_engines(struct i915_gem_engines *e)
1021 __free_engines(e, e->num_engines);
1024 static void free_engines_rcu(struct rcu_head *rcu)
1026 struct i915_gem_engines *engines =
1027 container_of(rcu, struct i915_gem_engines, rcu);
1029 i915_sw_fence_fini(&engines->fence);
1030 free_engines(engines);
1034 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
1036 struct i915_gem_engines *engines =
1037 container_of(fence, typeof(*engines), fence);
1040 case FENCE_COMPLETE:
1041 if (!list_empty(&engines->link)) {
1042 struct i915_gem_context *ctx = engines->ctx;
1043 unsigned long flags;
1045 spin_lock_irqsave(&ctx->stale.lock, flags);
1046 list_del(&engines->link);
1047 spin_unlock_irqrestore(&ctx->stale.lock, flags);
1049 i915_gem_context_put(engines->ctx);
1053 init_rcu_head(&engines->rcu);
1054 call_rcu(&engines->rcu, free_engines_rcu);
1061 static struct i915_gem_engines *alloc_engines(unsigned int count)
1063 struct i915_gem_engines *e;
1065 e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
1069 i915_sw_fence_init(&e->fence, engines_notify);
1073 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
1074 struct intel_sseu rcs_sseu)
1076 const struct intel_gt *gt = to_gt(ctx->i915);
1077 struct intel_engine_cs *engine;
1078 struct i915_gem_engines *e, *err;
1079 enum intel_engine_id id;
1081 e = alloc_engines(I915_NUM_ENGINES);
1083 return ERR_PTR(-ENOMEM);
1085 for_each_engine(engine, gt, id) {
1086 struct intel_context *ce;
1087 struct intel_sseu sseu = {};
1090 if (engine->legacy_idx == INVALID_ENGINE)
1093 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
1094 GEM_BUG_ON(e->engines[engine->legacy_idx]);
1096 ce = intel_context_create(engine);
1102 e->engines[engine->legacy_idx] = ce;
1103 e->num_engines = max(e->num_engines, engine->legacy_idx + 1);
1105 if (engine->class == RENDER_CLASS)
1108 ret = intel_context_set_gem(ce, ctx, sseu);
1123 static int perma_pin_contexts(struct intel_context *ce)
1125 struct intel_context *child;
1126 int i = 0, j = 0, ret;
1128 GEM_BUG_ON(!intel_context_is_parent(ce));
1130 ret = intel_context_pin(ce);
1134 for_each_child(ce, child) {
1135 ret = intel_context_pin(child);
1141 set_bit(CONTEXT_PERMA_PIN, &ce->flags);
1146 intel_context_unpin(ce);
1147 for_each_child(ce, child) {
1149 intel_context_unpin(child);
1157 static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
1158 unsigned int num_engines,
1159 struct i915_gem_proto_engine *pe)
1161 struct i915_gem_engines *e, *err;
1164 e = alloc_engines(num_engines);
1166 return ERR_PTR(-ENOMEM);
1167 e->num_engines = num_engines;
1169 for (n = 0; n < num_engines; n++) {
1170 struct intel_context *ce, *child;
1173 switch (pe[n].type) {
1174 case I915_GEM_ENGINE_TYPE_PHYSICAL:
1175 ce = intel_context_create(pe[n].engine);
1178 case I915_GEM_ENGINE_TYPE_BALANCED:
1179 ce = intel_engine_create_virtual(pe[n].siblings,
1180 pe[n].num_siblings, 0);
1183 case I915_GEM_ENGINE_TYPE_PARALLEL:
1184 ce = intel_engine_create_parallel(pe[n].siblings,
1189 case I915_GEM_ENGINE_TYPE_INVALID:
1191 GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID);
1202 ret = intel_context_set_gem(ce, ctx, pe->sseu);
1207 for_each_child(ce, child) {
1208 ret = intel_context_set_gem(child, ctx, pe->sseu);
1216 * XXX: Must be done after calling intel_context_set_gem as that
1217 * function changes the ring size. The ring is allocated when
1218 * the context is pinned. If the ring size is changed after
1219 * allocation we have a mismatch of the ring size and will cause
1220 * the context to hang. Presumably with a bit of reordering we
1221 * could move the perma-pin step to the backend function
1222 * intel_engine_create_parallel.
1224 if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) {
1225 ret = perma_pin_contexts(ce);
1240 static void i915_gem_context_release_work(struct work_struct *work)
1242 struct i915_gem_context *ctx = container_of(work, typeof(*ctx),
1244 struct i915_address_space *vm;
1246 trace_i915_context_free(ctx);
1247 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1250 drm_syncobj_put(ctx->syncobj);
1256 if (ctx->pxp_wakeref)
1257 intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref);
1259 mutex_destroy(&ctx->engines_mutex);
1260 mutex_destroy(&ctx->lut_mutex);
1263 mutex_destroy(&ctx->mutex);
1265 kfree_rcu(ctx, rcu);
1268 void i915_gem_context_release(struct kref *ref)
1270 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
1272 queue_work(ctx->i915->wq, &ctx->release_work);
1275 static inline struct i915_gem_engines *
1276 __context_engines_static(const struct i915_gem_context *ctx)
1278 return rcu_dereference_protected(ctx->engines, true);
1281 static void __reset_context(struct i915_gem_context *ctx,
1282 struct intel_engine_cs *engine)
1284 intel_gt_handle_error(engine->gt, engine->mask, 0,
1285 "context closure in %s", ctx->name);
1288 static bool __cancel_engine(struct intel_engine_cs *engine)
1291 * Send a "high priority pulse" down the engine to cause the
1292 * current request to be momentarily preempted. (If it fails to
1293 * be preempted, it will be reset). As we have marked our context
1294 * as banned, any incomplete request, including any running, will
1295 * be skipped following the preemption.
1297 * If there is no hangchecking (one of the reasons why we try to
1298 * cancel the context) and no forced preemption, there may be no
1299 * means by which we reset the GPU and evict the persistent hog.
1300 * Ergo if we are unable to inject a preemptive pulse that can
1301 * kill the banned context, we fallback to doing a local reset
1304 return intel_engine_pulse(engine) == 0;
1307 static struct intel_engine_cs *active_engine(struct intel_context *ce)
1309 struct intel_engine_cs *engine = NULL;
1310 struct i915_request *rq;
1312 if (intel_context_has_inflight(ce))
1313 return intel_context_inflight(ce);
1319 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
1320 * to the request to prevent it being transferred to a new timeline
1321 * (and onto a new timeline->requests list).
1324 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
1327 /* timeline is already completed upto this point? */
1328 if (!i915_request_get_rcu(rq))
1331 /* Check with the backend if the request is inflight */
1333 if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
1334 found = i915_request_active_engine(rq, &engine);
1336 i915_request_put(rq);
1345 static void kill_engines(struct i915_gem_engines *engines, bool ban)
1347 struct i915_gem_engines_iter it;
1348 struct intel_context *ce;
1351 * Map the user's engine back to the actual engines; one virtual
1352 * engine will be mapped to multiple engines, and using ctx->engine[]
1353 * the same engine may be have multiple instances in the user's map.
1354 * However, we only care about pending requests, so only include
1355 * engines on which there are incomplete requests.
1357 for_each_gem_engine(ce, engines, it) {
1358 struct intel_engine_cs *engine;
1360 if (ban && intel_context_ban(ce, NULL))
1364 * Check the current active state of this context; if we
1365 * are currently executing on the GPU we need to evict
1366 * ourselves. On the other hand, if we haven't yet been
1367 * submitted to the GPU or if everything is complete,
1368 * we have nothing to do.
1370 engine = active_engine(ce);
1372 /* First attempt to gracefully cancel the context */
1373 if (engine && !__cancel_engine(engine) && ban)
1375 * If we are unable to send a preemptive pulse to bump
1376 * the context from the GPU, we have to resort to a full
1377 * reset. We hope the collateral damage is worth it.
1379 __reset_context(engines->ctx, engine);
1383 static void kill_context(struct i915_gem_context *ctx)
1385 bool ban = (!i915_gem_context_is_persistent(ctx) ||
1386 !ctx->i915->params.enable_hangcheck);
1387 struct i915_gem_engines *pos, *next;
1389 spin_lock_irq(&ctx->stale.lock);
1390 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
1391 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
1392 if (!i915_sw_fence_await(&pos->fence)) {
1393 list_del_init(&pos->link);
1397 spin_unlock_irq(&ctx->stale.lock);
1399 kill_engines(pos, ban);
1401 spin_lock_irq(&ctx->stale.lock);
1402 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
1403 list_safe_reset_next(pos, next, link);
1404 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
1406 i915_sw_fence_complete(&pos->fence);
1408 spin_unlock_irq(&ctx->stale.lock);
1411 static void engines_idle_release(struct i915_gem_context *ctx,
1412 struct i915_gem_engines *engines)
1414 struct i915_gem_engines_iter it;
1415 struct intel_context *ce;
1417 INIT_LIST_HEAD(&engines->link);
1419 engines->ctx = i915_gem_context_get(ctx);
1421 for_each_gem_engine(ce, engines, it) {
1424 /* serialises with execbuf */
1425 set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
1426 if (!intel_context_pin_if_active(ce))
1429 /* Wait until context is finally scheduled out and retired */
1430 err = i915_sw_fence_await_active(&engines->fence,
1432 I915_ACTIVE_AWAIT_BARRIER);
1433 intel_context_unpin(ce);
1438 spin_lock_irq(&ctx->stale.lock);
1439 if (!i915_gem_context_is_closed(ctx))
1440 list_add_tail(&engines->link, &ctx->stale.engines);
1441 spin_unlock_irq(&ctx->stale.lock);
1444 if (list_empty(&engines->link)) /* raced, already closed */
1445 kill_engines(engines, true);
1447 i915_sw_fence_commit(&engines->fence);
1450 static void set_closed_name(struct i915_gem_context *ctx)
1454 /* Replace '[]' with '<>' to indicate closed in debug prints */
1456 s = strrchr(ctx->name, '[');
1462 s = strchr(s + 1, ']');
1467 static void context_close(struct i915_gem_context *ctx)
1469 struct i915_address_space *vm;
1471 /* Flush any concurrent set_engines() */
1472 mutex_lock(&ctx->engines_mutex);
1473 unpin_engines(__context_engines_static(ctx));
1474 engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
1475 i915_gem_context_set_closed(ctx);
1476 mutex_unlock(&ctx->engines_mutex);
1478 mutex_lock(&ctx->mutex);
1480 set_closed_name(ctx);
1484 /* i915_vm_close drops the final reference, which is a bit too
1485 * early and could result in surprises with concurrent
1486 * operations racing with thist ctx close. Keep a full reference
1493 ctx->file_priv = ERR_PTR(-EBADF);
1496 * The LUT uses the VMA as a backpointer to unref the object,
1497 * so we need to clear the LUT before we close all the VMA (inside
1502 spin_lock(&ctx->i915->gem.contexts.lock);
1503 list_del(&ctx->link);
1504 spin_unlock(&ctx->i915->gem.contexts.lock);
1506 mutex_unlock(&ctx->mutex);
1509 * If the user has disabled hangchecking, we can not be sure that
1510 * the batches will ever complete after the context is closed,
1511 * keeping the context and all resources pinned forever. So in this
1512 * case we opt to forcibly kill off all remaining requests on
1517 i915_gem_context_put(ctx);
1520 static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
1522 if (i915_gem_context_is_persistent(ctx) == state)
1527 * Only contexts that are short-lived [that will expire or be
1528 * reset] are allowed to survive past termination. We require
1529 * hangcheck to ensure that the persistent requests are healthy.
1531 if (!ctx->i915->params.enable_hangcheck)
1534 i915_gem_context_set_persistence(ctx);
1536 /* To cancel a context we use "preempt-to-idle" */
1537 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
1541 * If the cancel fails, we then need to reset, cleanly!
1543 * If the per-engine reset fails, all hope is lost! We resort
1544 * to a full GPU reset in that unlikely case, but realistically
1545 * if the engine could not reset, the full reset does not fare
1546 * much better. The damage has been done.
1548 * However, if we cannot reset an engine by itself, we cannot
1549 * cleanup a hanging persistent context without causing
1550 * colateral damage, and we should not pretend we can by
1551 * exposing the interface.
1553 if (!intel_has_reset_engine(to_gt(ctx->i915)))
1556 i915_gem_context_clear_persistence(ctx);
1562 static struct i915_gem_context *
1563 i915_gem_create_context(struct drm_i915_private *i915,
1564 const struct i915_gem_proto_context *pc)
1566 struct i915_gem_context *ctx;
1567 struct i915_address_space *vm = NULL;
1568 struct i915_gem_engines *e;
1572 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1574 return ERR_PTR(-ENOMEM);
1576 kref_init(&ctx->ref);
1578 ctx->sched = pc->sched;
1579 mutex_init(&ctx->mutex);
1580 INIT_LIST_HEAD(&ctx->link);
1581 INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
1583 spin_lock_init(&ctx->stale.lock);
1584 INIT_LIST_HEAD(&ctx->stale.engines);
1587 vm = i915_vm_get(pc->vm);
1588 } else if (HAS_FULL_PPGTT(i915)) {
1589 struct i915_ppgtt *ppgtt;
1591 ppgtt = i915_ppgtt_create(to_gt(i915), 0);
1592 if (IS_ERR(ppgtt)) {
1593 drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
1595 err = PTR_ERR(ppgtt);
1601 ctx->vm = i915_vm_open(vm);
1603 /* i915_vm_open() takes a reference */
1607 mutex_init(&ctx->engines_mutex);
1608 if (pc->num_user_engines >= 0) {
1609 i915_gem_context_set_user_engines(ctx);
1610 e = user_engines(ctx, pc->num_user_engines, pc->user_engines);
1612 i915_gem_context_clear_user_engines(ctx);
1613 e = default_engines(ctx, pc->legacy_rcs_sseu);
1619 RCU_INIT_POINTER(ctx->engines, e);
1621 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
1622 mutex_init(&ctx->lut_mutex);
1624 /* NB: Mark all slices as needing a remap so that when the context first
1625 * loads it will restore whatever remap state already exists. If there
1626 * is no remap info, it will be a NOP. */
1627 ctx->remap_slice = ALL_L3_SLICES(i915);
1629 ctx->user_flags = pc->user_flags;
1631 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
1632 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
1634 if (pc->single_timeline) {
1635 err = drm_syncobj_create(&ctx->syncobj,
1636 DRM_SYNCOBJ_CREATE_SIGNALED,
1642 if (pc->uses_protected_content) {
1643 ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1644 ctx->uses_protected_content = true;
1647 trace_i915_context_create(ctx);
1655 i915_vm_close(ctx->vm);
1658 return ERR_PTR(err);
1661 static void init_contexts(struct i915_gem_contexts *gc)
1663 spin_lock_init(&gc->lock);
1664 INIT_LIST_HEAD(&gc->list);
1667 void i915_gem_init__contexts(struct drm_i915_private *i915)
1669 init_contexts(&i915->gem.contexts);
1672 static void gem_context_register(struct i915_gem_context *ctx,
1673 struct drm_i915_file_private *fpriv,
1676 struct drm_i915_private *i915 = ctx->i915;
1679 ctx->file_priv = fpriv;
1681 ctx->pid = get_task_pid(current, PIDTYPE_PID);
1682 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
1683 current->comm, pid_nr(ctx->pid));
1685 /* And finally expose ourselves to userspace via the idr */
1686 old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
1689 spin_lock(&i915->gem.contexts.lock);
1690 list_add_tail(&ctx->link, &i915->gem.contexts.list);
1691 spin_unlock(&i915->gem.contexts.lock);
1694 int i915_gem_context_open(struct drm_i915_private *i915,
1695 struct drm_file *file)
1697 struct drm_i915_file_private *file_priv = file->driver_priv;
1698 struct i915_gem_proto_context *pc;
1699 struct i915_gem_context *ctx;
1702 mutex_init(&file_priv->proto_context_lock);
1703 xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC);
1705 /* 0 reserved for the default context */
1706 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1);
1708 /* 0 reserved for invalid/unassigned ppgtt */
1709 xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
1711 pc = proto_context_create(i915, 0);
1717 ctx = i915_gem_create_context(i915, pc);
1718 proto_context_close(i915, pc);
1724 gem_context_register(ctx, file_priv, 0);
1729 xa_destroy(&file_priv->vm_xa);
1730 xa_destroy(&file_priv->context_xa);
1731 xa_destroy(&file_priv->proto_context_xa);
1732 mutex_destroy(&file_priv->proto_context_lock);
1736 void i915_gem_context_close(struct drm_file *file)
1738 struct drm_i915_file_private *file_priv = file->driver_priv;
1739 struct i915_gem_proto_context *pc;
1740 struct i915_address_space *vm;
1741 struct i915_gem_context *ctx;
1744 xa_for_each(&file_priv->proto_context_xa, idx, pc)
1745 proto_context_close(file_priv->dev_priv, pc);
1746 xa_destroy(&file_priv->proto_context_xa);
1747 mutex_destroy(&file_priv->proto_context_lock);
1749 xa_for_each(&file_priv->context_xa, idx, ctx)
1751 xa_destroy(&file_priv->context_xa);
1753 xa_for_each(&file_priv->vm_xa, idx, vm)
1755 xa_destroy(&file_priv->vm_xa);
1758 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
1759 struct drm_file *file)
1761 struct drm_i915_private *i915 = to_i915(dev);
1762 struct drm_i915_gem_vm_control *args = data;
1763 struct drm_i915_file_private *file_priv = file->driver_priv;
1764 struct i915_ppgtt *ppgtt;
1768 if (!HAS_FULL_PPGTT(i915))
1774 ppgtt = i915_ppgtt_create(to_gt(i915), 0);
1776 return PTR_ERR(ppgtt);
1778 if (args->extensions) {
1779 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
1786 err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
1787 xa_limit_32b, GFP_KERNEL);
1791 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1796 i915_vm_put(&ppgtt->vm);
1800 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
1801 struct drm_file *file)
1803 struct drm_i915_file_private *file_priv = file->driver_priv;
1804 struct drm_i915_gem_vm_control *args = data;
1805 struct i915_address_space *vm;
1810 if (args->extensions)
1813 vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1821 static int get_ppgtt(struct drm_i915_file_private *file_priv,
1822 struct i915_gem_context *ctx,
1823 struct drm_i915_gem_context_param *args)
1825 struct i915_address_space *vm;
1829 if (!i915_gem_context_has_full_ppgtt(ctx))
1835 err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1841 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1849 i915_gem_user_to_context_sseu(struct intel_gt *gt,
1850 const struct drm_i915_gem_context_param_sseu *user,
1851 struct intel_sseu *context)
1853 const struct sseu_dev_info *device = >->info.sseu;
1854 struct drm_i915_private *i915 = gt->i915;
1856 /* No zeros in any field. */
1857 if (!user->slice_mask || !user->subslice_mask ||
1858 !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1862 if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1866 * Some future proofing on the types since the uAPI is wider than the
1867 * current internal implementation.
1869 if (overflows_type(user->slice_mask, context->slice_mask) ||
1870 overflows_type(user->subslice_mask, context->subslice_mask) ||
1871 overflows_type(user->min_eus_per_subslice,
1872 context->min_eus_per_subslice) ||
1873 overflows_type(user->max_eus_per_subslice,
1874 context->max_eus_per_subslice))
1877 /* Check validity against hardware. */
1878 if (user->slice_mask & ~device->slice_mask)
1881 if (user->subslice_mask & ~device->subslice_mask[0])
1884 if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1887 context->slice_mask = user->slice_mask;
1888 context->subslice_mask = user->subslice_mask;
1889 context->min_eus_per_subslice = user->min_eus_per_subslice;
1890 context->max_eus_per_subslice = user->max_eus_per_subslice;
1892 /* Part specific restrictions. */
1893 if (GRAPHICS_VER(i915) == 11) {
1894 unsigned int hw_s = hweight8(device->slice_mask);
1895 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1896 unsigned int req_s = hweight8(context->slice_mask);
1897 unsigned int req_ss = hweight8(context->subslice_mask);
1900 * Only full subslice enablement is possible if more than one
1901 * slice is turned on.
1903 if (req_s > 1 && req_ss != hw_ss_per_s)
1907 * If more than four (SScount bitfield limit) subslices are
1908 * requested then the number has to be even.
1910 if (req_ss > 4 && (req_ss & 1))
1914 * If only one slice is enabled and subslice count is below the
1915 * device full enablement, it must be at most half of the all
1916 * available subslices.
1918 if (req_s == 1 && req_ss < hw_ss_per_s &&
1919 req_ss > (hw_ss_per_s / 2))
1922 /* ABI restriction - VME use case only. */
1924 /* All slices or one slice only. */
1925 if (req_s != 1 && req_s != hw_s)
1929 * Half subslices or full enablement only when one slice is
1933 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1936 /* No EU configuration changes. */
1937 if ((user->min_eus_per_subslice !=
1938 device->max_eus_per_subslice) ||
1939 (user->max_eus_per_subslice !=
1940 device->max_eus_per_subslice))
1947 static int set_sseu(struct i915_gem_context *ctx,
1948 struct drm_i915_gem_context_param *args)
1950 struct drm_i915_private *i915 = ctx->i915;
1951 struct drm_i915_gem_context_param_sseu user_sseu;
1952 struct intel_context *ce;
1953 struct intel_sseu sseu;
1954 unsigned long lookup;
1957 if (args->size < sizeof(user_sseu))
1960 if (GRAPHICS_VER(i915) != 11)
1963 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1970 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1974 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1975 lookup |= LOOKUP_USER_INDEX;
1977 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1981 /* Only render engine supports RPCS configuration. */
1982 if (ce->engine->class != RENDER_CLASS) {
1987 ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
1991 ret = intel_context_reconfigure_sseu(ce, sseu);
1995 args->size = sizeof(user_sseu);
1998 intel_context_put(ce);
2003 set_persistence(struct i915_gem_context *ctx,
2004 const struct drm_i915_gem_context_param *args)
2009 return __context_set_persistence(ctx, args->value);
2012 static int set_priority(struct i915_gem_context *ctx,
2013 const struct drm_i915_gem_context_param *args)
2015 struct i915_gem_engines_iter it;
2016 struct intel_context *ce;
2019 err = validate_priority(ctx->i915, args);
2023 ctx->sched.priority = args->value;
2025 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
2026 if (!intel_engine_has_timeslices(ce->engine))
2029 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
2030 intel_engine_has_semaphores(ce->engine))
2031 intel_context_set_use_semaphores(ce);
2033 intel_context_clear_use_semaphores(ce);
2035 i915_gem_context_unlock_engines(ctx);
2040 static int get_protected(struct i915_gem_context *ctx,
2041 struct drm_i915_gem_context_param *args)
2044 args->value = i915_gem_context_uses_protected_content(ctx);
2049 static int ctx_setparam(struct drm_i915_file_private *fpriv,
2050 struct i915_gem_context *ctx,
2051 struct drm_i915_gem_context_param *args)
2055 switch (args->param) {
2056 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2059 else if (args->value)
2060 i915_gem_context_set_no_error_capture(ctx);
2062 i915_gem_context_clear_no_error_capture(ctx);
2065 case I915_CONTEXT_PARAM_BANNABLE:
2068 else if (!capable(CAP_SYS_ADMIN) && !args->value)
2070 else if (args->value)
2071 i915_gem_context_set_bannable(ctx);
2072 else if (i915_gem_context_uses_protected_content(ctx))
2073 ret = -EPERM; /* can't clear this for protected contexts */
2075 i915_gem_context_clear_bannable(ctx);
2078 case I915_CONTEXT_PARAM_RECOVERABLE:
2081 else if (!args->value)
2082 i915_gem_context_clear_recoverable(ctx);
2083 else if (i915_gem_context_uses_protected_content(ctx))
2084 ret = -EPERM; /* can't set this for protected contexts */
2086 i915_gem_context_set_recoverable(ctx);
2089 case I915_CONTEXT_PARAM_PRIORITY:
2090 ret = set_priority(ctx, args);
2093 case I915_CONTEXT_PARAM_SSEU:
2094 ret = set_sseu(ctx, args);
2097 case I915_CONTEXT_PARAM_PERSISTENCE:
2098 ret = set_persistence(ctx, args);
2101 case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2102 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2103 case I915_CONTEXT_PARAM_BAN_PERIOD:
2104 case I915_CONTEXT_PARAM_RINGSIZE:
2105 case I915_CONTEXT_PARAM_VM:
2106 case I915_CONTEXT_PARAM_ENGINES:
2116 struct i915_gem_proto_context *pc;
2117 struct drm_i915_file_private *fpriv;
2120 static int create_setparam(struct i915_user_extension __user *ext, void *data)
2122 struct drm_i915_gem_context_create_ext_setparam local;
2123 const struct create_ext *arg = data;
2125 if (copy_from_user(&local, ext, sizeof(local)))
2128 if (local.param.ctx_id)
2131 return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param);
2134 static int invalid_ext(struct i915_user_extension __user *ext, void *data)
2139 static const i915_user_extension_fn create_extensions[] = {
2140 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2141 [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext,
2144 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2146 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2149 static inline struct i915_gem_context *
2150 __context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2152 struct i915_gem_context *ctx;
2155 ctx = xa_load(&file_priv->context_xa, id);
2156 if (ctx && !kref_get_unless_zero(&ctx->ref))
2163 static struct i915_gem_context *
2164 finalize_create_context_locked(struct drm_i915_file_private *file_priv,
2165 struct i915_gem_proto_context *pc, u32 id)
2167 struct i915_gem_context *ctx;
2170 lockdep_assert_held(&file_priv->proto_context_lock);
2172 ctx = i915_gem_create_context(file_priv->dev_priv, pc);
2176 gem_context_register(ctx, file_priv, id);
2178 old = xa_erase(&file_priv->proto_context_xa, id);
2179 GEM_BUG_ON(old != pc);
2180 proto_context_close(file_priv->dev_priv, pc);
2182 /* One for the xarray and one for the caller */
2183 return i915_gem_context_get(ctx);
2186 struct i915_gem_context *
2187 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
2189 struct i915_gem_proto_context *pc;
2190 struct i915_gem_context *ctx;
2192 ctx = __context_lookup(file_priv, id);
2196 mutex_lock(&file_priv->proto_context_lock);
2197 /* Try one more time under the lock */
2198 ctx = __context_lookup(file_priv, id);
2200 pc = xa_load(&file_priv->proto_context_xa, id);
2202 ctx = ERR_PTR(-ENOENT);
2204 ctx = finalize_create_context_locked(file_priv, pc, id);
2206 mutex_unlock(&file_priv->proto_context_lock);
2211 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2212 struct drm_file *file)
2214 struct drm_i915_private *i915 = to_i915(dev);
2215 struct drm_i915_gem_context_create_ext *args = data;
2216 struct create_ext ext_data;
2220 if (!DRIVER_CAPS(i915)->has_logical_contexts)
2223 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2226 ret = intel_gt_terminally_wedged(to_gt(i915));
2230 ext_data.fpriv = file->driver_priv;
2231 if (client_is_banned(ext_data.fpriv)) {
2233 "client %s[%d] banned from creating ctx\n",
2234 current->comm, task_pid_nr(current));
2238 ext_data.pc = proto_context_create(i915, args->flags);
2239 if (IS_ERR(ext_data.pc))
2240 return PTR_ERR(ext_data.pc);
2242 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2243 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2245 ARRAY_SIZE(create_extensions),
2251 if (GRAPHICS_VER(i915) > 12) {
2252 struct i915_gem_context *ctx;
2254 /* Get ourselves a context ID */
2255 ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL,
2256 xa_limit_32b, GFP_KERNEL);
2260 ctx = i915_gem_create_context(i915, ext_data.pc);
2266 proto_context_close(i915, ext_data.pc);
2267 gem_context_register(ctx, ext_data.fpriv, id);
2269 ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id);
2275 drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
2280 proto_context_close(i915, ext_data.pc);
2284 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2285 struct drm_file *file)
2287 struct drm_i915_gem_context_destroy *args = data;
2288 struct drm_i915_file_private *file_priv = file->driver_priv;
2289 struct i915_gem_proto_context *pc;
2290 struct i915_gem_context *ctx;
2298 /* We need to hold the proto-context lock here to prevent races
2299 * with finalize_create_context_locked().
2301 mutex_lock(&file_priv->proto_context_lock);
2302 ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2303 pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id);
2304 mutex_unlock(&file_priv->proto_context_lock);
2308 GEM_WARN_ON(ctx && pc);
2311 proto_context_close(file_priv->dev_priv, pc);
2319 static int get_sseu(struct i915_gem_context *ctx,
2320 struct drm_i915_gem_context_param *args)
2322 struct drm_i915_gem_context_param_sseu user_sseu;
2323 struct intel_context *ce;
2324 unsigned long lookup;
2327 if (args->size == 0)
2329 else if (args->size < sizeof(user_sseu))
2332 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2339 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2343 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2344 lookup |= LOOKUP_USER_INDEX;
2346 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2350 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2352 intel_context_put(ce);
2356 user_sseu.slice_mask = ce->sseu.slice_mask;
2357 user_sseu.subslice_mask = ce->sseu.subslice_mask;
2358 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2359 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2361 intel_context_unlock_pinned(ce);
2362 intel_context_put(ce);
2364 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2369 args->size = sizeof(user_sseu);
2374 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2375 struct drm_file *file)
2377 struct drm_i915_file_private *file_priv = file->driver_priv;
2378 struct drm_i915_gem_context_param *args = data;
2379 struct i915_gem_context *ctx;
2380 struct i915_address_space *vm;
2383 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2385 return PTR_ERR(ctx);
2387 switch (args->param) {
2388 case I915_CONTEXT_PARAM_GTT_SIZE:
2390 vm = i915_gem_context_get_eb_vm(ctx);
2391 args->value = vm->total;
2396 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2398 args->value = i915_gem_context_no_error_capture(ctx);
2401 case I915_CONTEXT_PARAM_BANNABLE:
2403 args->value = i915_gem_context_is_bannable(ctx);
2406 case I915_CONTEXT_PARAM_RECOVERABLE:
2408 args->value = i915_gem_context_is_recoverable(ctx);
2411 case I915_CONTEXT_PARAM_PRIORITY:
2413 args->value = ctx->sched.priority;
2416 case I915_CONTEXT_PARAM_SSEU:
2417 ret = get_sseu(ctx, args);
2420 case I915_CONTEXT_PARAM_VM:
2421 ret = get_ppgtt(file_priv, ctx, args);
2424 case I915_CONTEXT_PARAM_PERSISTENCE:
2426 args->value = i915_gem_context_is_persistent(ctx);
2429 case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2430 ret = get_protected(ctx, args);
2433 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2434 case I915_CONTEXT_PARAM_BAN_PERIOD:
2435 case I915_CONTEXT_PARAM_ENGINES:
2436 case I915_CONTEXT_PARAM_RINGSIZE:
2442 i915_gem_context_put(ctx);
2446 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2447 struct drm_file *file)
2449 struct drm_i915_file_private *file_priv = file->driver_priv;
2450 struct drm_i915_gem_context_param *args = data;
2451 struct i915_gem_proto_context *pc;
2452 struct i915_gem_context *ctx;
2455 mutex_lock(&file_priv->proto_context_lock);
2456 ctx = __context_lookup(file_priv, args->ctx_id);
2458 pc = xa_load(&file_priv->proto_context_xa, args->ctx_id);
2460 /* Contexts should be finalized inside
2461 * GEM_CONTEXT_CREATE starting with graphics
2464 WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12);
2465 ret = set_proto_ctx_param(file_priv, pc, args);
2470 mutex_unlock(&file_priv->proto_context_lock);
2473 ret = ctx_setparam(file_priv, ctx, args);
2474 i915_gem_context_put(ctx);
2480 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2481 void *data, struct drm_file *file)
2483 struct drm_i915_private *i915 = to_i915(dev);
2484 struct drm_i915_reset_stats *args = data;
2485 struct i915_gem_context *ctx;
2487 if (args->flags || args->pad)
2490 ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
2492 return PTR_ERR(ctx);
2495 * We opt for unserialised reads here. This may result in tearing
2496 * in the extremely unlikely event of a GPU hang on this context
2497 * as we are querying them. If we need that extra layer of protection,
2498 * we should wrap the hangstats with a seqlock.
2501 if (capable(CAP_SYS_ADMIN))
2502 args->reset_count = i915_reset_count(&i915->gpu_error);
2504 args->reset_count = 0;
2506 args->batch_active = atomic_read(&ctx->guilty_count);
2507 args->batch_pending = atomic_read(&ctx->active_count);
2509 i915_gem_context_put(ctx);
2513 /* GEM context-engines iterator: for_each_gem_engine() */
2514 struct intel_context *
2515 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2517 const struct i915_gem_engines *e = it->engines;
2518 struct intel_context *ctx;
2524 if (it->idx >= e->num_engines)
2527 ctx = e->engines[it->idx++];
2533 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2534 #include "selftests/mock_context.c"
2535 #include "selftests/i915_gem_context.c"
2538 void i915_gem_context_module_exit(void)
2540 kmem_cache_destroy(slab_luts);
2543 int __init i915_gem_context_module_init(void)
2545 slab_luts = KMEM_CACHE(i915_lut_handle, 0);