2 * SPDX-License-Identifier: MIT
4 * Copyright © 2011-2012 Intel Corporation
8 * This file implements HW context support. On gen5+ a HW context consists of an
9 * opaque GPU object which is referenced at times of context saves and restores.
10 * With RC6 enabled, the context is also referenced as the GPU enters and exists
11 * from RC6 (GPU has it's own internal power context, except on gen5). Though
12 * something like a context does exist for the media ring, the code only
13 * supports contexts for the render ring.
15 * In software, there is a distinction between contexts created by the user,
16 * and the default HW context. The default HW context is used by GPU clients
17 * that do not request setup of their own hardware context. The default
18 * context's state is never restored to help prevent programming errors. This
19 * would happen if a client ran and piggy-backed off another clients GPU state.
20 * The default context only exists to give the GPU some offset to load as the
21 * current to invoke a save of the context we actually care about. In fact, the
22 * code could likely be constructed, albeit in a more complicated fashion, to
23 * never use the default context, though that limits the driver's ability to
24 * swap out, and/or destroy other contexts.
26 * All other contexts are created as a request by the GPU client. These contexts
27 * store GPU state, and thus allow GPU clients to not re-emit state (and
28 * potentially query certain state) at any time. The kernel driver makes
29 * certain that the appropriate commands are inserted.
31 * The context life cycle is semi-complicated in that context BOs may live
32 * longer than the context itself because of the way the hardware, and object
33 * tracking works. Below is a very crude representation of the state machine
34 * describing the context life.
35 * refcount pincount active
36 * S0: initial state 0 0 0
37 * S1: context created 1 0 0
38 * S2: context is currently running 2 1 X
39 * S3: GPU referenced, but not current 2 0 1
40 * S4: context is current, but destroyed 1 1 0
41 * S5: like S3, but destroyed 1 0 1
43 * The most common (but not all) transitions:
44 * S0->S1: client creates a context
45 * S1->S2: client submits execbuf with context
46 * S2->S3: other clients submits execbuf with context
47 * S3->S1: context object was retired
48 * S3->S2: clients submits another execbuf
49 * S2->S4: context destroy called with current context
50 * S3->S5->S0: destroy path
51 * S4->S5->S0: destroy path on current context
53 * There are two confusing terms used above:
54 * The "current context" means the context which is currently running on the
55 * GPU. The GPU has loaded its state already and has stored away the gtt
56 * offset of the BO. The GPU is not actively referencing the data at this
57 * offset, but it will on the next context switch. The only way to avoid this
58 * is to do a GPU reset.
60 * An "active context' is one which was previously the "current context" and is
61 * on the active list waiting for the next context switch to occur. Until this
62 * happens, the object must remain at the same gtt offset. It is therefore
63 * possible to destroy a context, but it is still active.
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
70 #include "gt/gen6_ppgtt.h"
71 #include "gt/intel_context.h"
72 #include "gt/intel_context_param.h"
73 #include "gt/intel_engine_heartbeat.h"
74 #include "gt/intel_engine_user.h"
75 #include "gt/intel_ring.h"
77 #include "i915_gem_context.h"
78 #include "i915_globals.h"
79 #include "i915_trace.h"
80 #include "i915_user_extensions.h"
82 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
84 static struct i915_global_gem_context {
85 struct i915_global base;
86 struct kmem_cache *slab_luts;
89 struct i915_lut_handle *i915_lut_handle_alloc(void)
91 return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
94 void i915_lut_handle_free(struct i915_lut_handle *lut)
96 return kmem_cache_free(global.slab_luts, lut);
99 static void lut_close(struct i915_gem_context *ctx)
101 struct radix_tree_iter iter;
104 mutex_lock(&ctx->lut_mutex);
106 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
107 struct i915_vma *vma = rcu_dereference_raw(*slot);
108 struct drm_i915_gem_object *obj = vma->obj;
109 struct i915_lut_handle *lut;
111 if (!kref_get_unless_zero(&obj->base.refcount))
114 spin_lock(&obj->lut_lock);
115 list_for_each_entry(lut, &obj->lut_list, obj_link) {
119 if (lut->handle != iter.index)
122 list_del(&lut->obj_link);
125 spin_unlock(&obj->lut_lock);
127 if (&lut->obj_link != &obj->lut_list) {
128 i915_lut_handle_free(lut);
129 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
131 i915_gem_object_put(obj);
134 i915_gem_object_put(obj);
137 mutex_unlock(&ctx->lut_mutex);
140 static struct intel_context *
141 lookup_user_engine(struct i915_gem_context *ctx,
143 const struct i915_engine_class_instance *ci)
144 #define LOOKUP_USER_INDEX BIT(0)
148 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
149 return ERR_PTR(-EINVAL);
151 if (!i915_gem_context_user_engines(ctx)) {
152 struct intel_engine_cs *engine;
154 engine = intel_engine_lookup_user(ctx->i915,
156 ci->engine_instance);
158 return ERR_PTR(-EINVAL);
160 idx = engine->legacy_idx;
162 idx = ci->engine_instance;
165 return i915_gem_context_get_engine(ctx, idx);
168 static struct i915_address_space *
169 context_get_vm_rcu(struct i915_gem_context *ctx)
171 GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
174 struct i915_address_space *vm;
177 * We do not allow downgrading from full-ppgtt [to a shared
178 * global gtt], so ctx->vm cannot become NULL.
180 vm = rcu_dereference(ctx->vm);
181 if (!kref_get_unless_zero(&vm->ref))
185 * This ppgtt may have be reallocated between
186 * the read and the kref, and reassigned to a third
187 * context. In order to avoid inadvertent sharing
188 * of this ppgtt with that third context (and not
189 * src), we have to confirm that we have the same
190 * ppgtt after passing through the strong memory
191 * barrier implied by a successful
192 * kref_get_unless_zero().
194 * Once we have acquired the current ppgtt of ctx,
195 * we no longer care if it is released from ctx, as
196 * it cannot be reallocated elsewhere.
199 if (vm == rcu_access_pointer(ctx->vm))
200 return rcu_pointer_handoff(vm);
206 static void intel_context_set_gem(struct intel_context *ce,
207 struct i915_gem_context *ctx)
209 GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
210 RCU_INIT_POINTER(ce->gem_context, ctx);
212 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
213 ce->ring = __intel_context_ring_size(SZ_16K);
215 if (rcu_access_pointer(ctx->vm)) {
216 struct i915_address_space *vm;
219 vm = context_get_vm_rcu(ctx); /* hmm */
226 GEM_BUG_ON(ce->timeline);
228 ce->timeline = intel_timeline_get(ctx->timeline);
230 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
231 intel_engine_has_timeslices(ce->engine))
232 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
235 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
238 if (!e->engines[count])
241 intel_context_put(e->engines[count]);
246 static void free_engines(struct i915_gem_engines *e)
248 __free_engines(e, e->num_engines);
251 static void free_engines_rcu(struct rcu_head *rcu)
253 struct i915_gem_engines *engines =
254 container_of(rcu, struct i915_gem_engines, rcu);
256 i915_sw_fence_fini(&engines->fence);
257 free_engines(engines);
260 static int __i915_sw_fence_call
261 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
263 struct i915_gem_engines *engines =
264 container_of(fence, typeof(*engines), fence);
268 if (!list_empty(&engines->link)) {
269 struct i915_gem_context *ctx = engines->ctx;
272 spin_lock_irqsave(&ctx->stale.lock, flags);
273 list_del(&engines->link);
274 spin_unlock_irqrestore(&ctx->stale.lock, flags);
276 i915_gem_context_put(engines->ctx);
280 init_rcu_head(&engines->rcu);
281 call_rcu(&engines->rcu, free_engines_rcu);
288 static struct i915_gem_engines *alloc_engines(unsigned int count)
290 struct i915_gem_engines *e;
292 e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
296 i915_sw_fence_init(&e->fence, engines_notify);
300 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
302 const struct intel_gt *gt = &ctx->i915->gt;
303 struct intel_engine_cs *engine;
304 struct i915_gem_engines *e;
305 enum intel_engine_id id;
307 e = alloc_engines(I915_NUM_ENGINES);
309 return ERR_PTR(-ENOMEM);
311 for_each_engine(engine, gt, id) {
312 struct intel_context *ce;
314 if (engine->legacy_idx == INVALID_ENGINE)
317 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
318 GEM_BUG_ON(e->engines[engine->legacy_idx]);
320 ce = intel_context_create(engine);
322 __free_engines(e, e->num_engines + 1);
326 intel_context_set_gem(ce, ctx);
328 e->engines[engine->legacy_idx] = ce;
329 e->num_engines = max(e->num_engines, engine->legacy_idx);
336 static void i915_gem_context_free(struct i915_gem_context *ctx)
338 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
340 spin_lock(&ctx->i915->gem.contexts.lock);
341 list_del(&ctx->link);
342 spin_unlock(&ctx->i915->gem.contexts.lock);
344 mutex_destroy(&ctx->engines_mutex);
345 mutex_destroy(&ctx->lut_mutex);
348 intel_timeline_put(ctx->timeline);
351 mutex_destroy(&ctx->mutex);
356 static void contexts_free_all(struct llist_node *list)
358 struct i915_gem_context *ctx, *cn;
360 llist_for_each_entry_safe(ctx, cn, list, free_link)
361 i915_gem_context_free(ctx);
364 static void contexts_flush_free(struct i915_gem_contexts *gc)
366 contexts_free_all(llist_del_all(&gc->free_list));
369 static void contexts_free_worker(struct work_struct *work)
371 struct i915_gem_contexts *gc =
372 container_of(work, typeof(*gc), free_work);
374 contexts_flush_free(gc);
377 void i915_gem_context_release(struct kref *ref)
379 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
380 struct i915_gem_contexts *gc = &ctx->i915->gem.contexts;
382 trace_i915_context_free(ctx);
383 if (llist_add(&ctx->free_link, &gc->free_list))
384 schedule_work(&gc->free_work);
387 static inline struct i915_gem_engines *
388 __context_engines_static(const struct i915_gem_context *ctx)
390 return rcu_dereference_protected(ctx->engines, true);
393 static bool __reset_engine(struct intel_engine_cs *engine)
395 struct intel_gt *gt = engine->gt;
396 bool success = false;
398 if (!intel_has_reset_engine(gt))
401 if (!test_and_set_bit(I915_RESET_ENGINE + engine->id,
403 success = intel_engine_reset(engine, NULL) == 0;
404 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
411 static void __reset_context(struct i915_gem_context *ctx,
412 struct intel_engine_cs *engine)
414 intel_gt_handle_error(engine->gt, engine->mask, 0,
415 "context closure in %s", ctx->name);
418 static bool __cancel_engine(struct intel_engine_cs *engine)
421 * Send a "high priority pulse" down the engine to cause the
422 * current request to be momentarily preempted. (If it fails to
423 * be preempted, it will be reset). As we have marked our context
424 * as banned, any incomplete request, including any running, will
425 * be skipped following the preemption.
427 * If there is no hangchecking (one of the reasons why we try to
428 * cancel the context) and no forced preemption, there may be no
429 * means by which we reset the GPU and evict the persistent hog.
430 * Ergo if we are unable to inject a preemptive pulse that can
431 * kill the banned context, we fallback to doing a local reset
434 if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) &&
435 !intel_engine_pulse(engine))
438 /* If we are unable to send a pulse, try resetting this engine. */
439 return __reset_engine(engine);
442 static struct intel_engine_cs *__active_engine(struct i915_request *rq)
444 struct intel_engine_cs *engine, *locked;
447 * Serialise with __i915_request_submit() so that it sees
448 * is-banned?, or we know the request is already inflight.
450 locked = READ_ONCE(rq->engine);
451 spin_lock_irq(&locked->active.lock);
452 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
453 spin_unlock(&locked->active.lock);
454 spin_lock(&engine->active.lock);
459 if (i915_request_is_active(rq) && rq->fence.error != -EIO)
462 spin_unlock_irq(&locked->active.lock);
467 static struct intel_engine_cs *active_engine(struct intel_context *ce)
469 struct intel_engine_cs *engine = NULL;
470 struct i915_request *rq;
475 mutex_lock(&ce->timeline->mutex);
476 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
477 if (i915_request_completed(rq))
480 /* Check with the backend if the request is inflight */
481 engine = __active_engine(rq);
485 mutex_unlock(&ce->timeline->mutex);
490 static void kill_engines(struct i915_gem_engines *engines)
492 struct i915_gem_engines_iter it;
493 struct intel_context *ce;
496 * Map the user's engine back to the actual engines; one virtual
497 * engine will be mapped to multiple engines, and using ctx->engine[]
498 * the same engine may be have multiple instances in the user's map.
499 * However, we only care about pending requests, so only include
500 * engines on which there are incomplete requests.
502 for_each_gem_engine(ce, engines, it) {
503 struct intel_engine_cs *engine;
505 if (intel_context_set_banned(ce))
509 * Check the current active state of this context; if we
510 * are currently executing on the GPU we need to evict
511 * ourselves. On the other hand, if we haven't yet been
512 * submitted to the GPU or if everything is complete,
513 * we have nothing to do.
515 engine = active_engine(ce);
517 /* First attempt to gracefully cancel the context */
518 if (engine && !__cancel_engine(engine))
520 * If we are unable to send a preemptive pulse to bump
521 * the context from the GPU, we have to resort to a full
522 * reset. We hope the collateral damage is worth it.
524 __reset_context(engines->ctx, engine);
528 static void kill_stale_engines(struct i915_gem_context *ctx)
530 struct i915_gem_engines *pos, *next;
532 spin_lock_irq(&ctx->stale.lock);
533 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
534 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
535 if (!i915_sw_fence_await(&pos->fence)) {
536 list_del_init(&pos->link);
540 spin_unlock_irq(&ctx->stale.lock);
544 spin_lock_irq(&ctx->stale.lock);
545 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
546 list_safe_reset_next(pos, next, link);
547 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
549 i915_sw_fence_complete(&pos->fence);
551 spin_unlock_irq(&ctx->stale.lock);
554 static void kill_context(struct i915_gem_context *ctx)
556 kill_stale_engines(ctx);
559 static void engines_idle_release(struct i915_gem_context *ctx,
560 struct i915_gem_engines *engines)
562 struct i915_gem_engines_iter it;
563 struct intel_context *ce;
565 INIT_LIST_HEAD(&engines->link);
567 engines->ctx = i915_gem_context_get(ctx);
569 for_each_gem_engine(ce, engines, it) {
572 /* serialises with execbuf */
573 set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
574 if (!intel_context_pin_if_active(ce))
577 /* Wait until context is finally scheduled out and retired */
578 err = i915_sw_fence_await_active(&engines->fence,
580 I915_ACTIVE_AWAIT_BARRIER);
581 intel_context_unpin(ce);
586 spin_lock_irq(&ctx->stale.lock);
587 if (!i915_gem_context_is_closed(ctx))
588 list_add_tail(&engines->link, &ctx->stale.engines);
589 spin_unlock_irq(&ctx->stale.lock);
592 if (list_empty(&engines->link)) /* raced, already closed */
593 kill_engines(engines);
595 i915_sw_fence_commit(&engines->fence);
598 static void set_closed_name(struct i915_gem_context *ctx)
602 /* Replace '[]' with '<>' to indicate closed in debug prints */
604 s = strrchr(ctx->name, '[');
610 s = strchr(s + 1, ']');
615 static void context_close(struct i915_gem_context *ctx)
617 struct i915_address_space *vm;
619 /* Flush any concurrent set_engines() */
620 mutex_lock(&ctx->engines_mutex);
621 engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
622 i915_gem_context_set_closed(ctx);
623 mutex_unlock(&ctx->engines_mutex);
625 mutex_lock(&ctx->mutex);
627 set_closed_name(ctx);
629 vm = i915_gem_context_vm(ctx);
633 ctx->file_priv = ERR_PTR(-EBADF);
636 * The LUT uses the VMA as a backpointer to unref the object,
637 * so we need to clear the LUT before we close all the VMA (inside
642 mutex_unlock(&ctx->mutex);
645 * If the user has disabled hangchecking, we can not be sure that
646 * the batches will ever complete after the context is closed,
647 * keeping the context and all resources pinned forever. So in this
648 * case we opt to forcibly kill off all remaining requests on
651 if (!i915_gem_context_is_persistent(ctx) ||
652 !ctx->i915->params.enable_hangcheck)
655 i915_gem_context_put(ctx);
658 static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
660 if (i915_gem_context_is_persistent(ctx) == state)
665 * Only contexts that are short-lived [that will expire or be
666 * reset] are allowed to survive past termination. We require
667 * hangcheck to ensure that the persistent requests are healthy.
669 if (!ctx->i915->params.enable_hangcheck)
672 i915_gem_context_set_persistence(ctx);
674 /* To cancel a context we use "preempt-to-idle" */
675 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
679 * If the cancel fails, we then need to reset, cleanly!
681 * If the per-engine reset fails, all hope is lost! We resort
682 * to a full GPU reset in that unlikely case, but realistically
683 * if the engine could not reset, the full reset does not fare
684 * much better. The damage has been done.
686 * However, if we cannot reset an engine by itself, we cannot
687 * cleanup a hanging persistent context without causing
688 * colateral damage, and we should not pretend we can by
689 * exposing the interface.
691 if (!intel_has_reset_engine(&ctx->i915->gt))
694 i915_gem_context_clear_persistence(ctx);
700 static struct i915_gem_context *
701 __create_context(struct drm_i915_private *i915)
703 struct i915_gem_context *ctx;
704 struct i915_gem_engines *e;
708 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
710 return ERR_PTR(-ENOMEM);
712 kref_init(&ctx->ref);
714 ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
715 mutex_init(&ctx->mutex);
717 spin_lock_init(&ctx->stale.lock);
718 INIT_LIST_HEAD(&ctx->stale.engines);
720 mutex_init(&ctx->engines_mutex);
721 e = default_engines(ctx);
726 RCU_INIT_POINTER(ctx->engines, e);
728 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
729 mutex_init(&ctx->lut_mutex);
731 /* NB: Mark all slices as needing a remap so that when the context first
732 * loads it will restore whatever remap state already exists. If there
733 * is no remap info, it will be a NOP. */
734 ctx->remap_slice = ALL_L3_SLICES(i915);
736 i915_gem_context_set_bannable(ctx);
737 i915_gem_context_set_recoverable(ctx);
738 __context_set_persistence(ctx, true /* cgroup hook? */);
740 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
741 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
743 spin_lock(&i915->gem.contexts.lock);
744 list_add_tail(&ctx->link, &i915->gem.contexts.list);
745 spin_unlock(&i915->gem.contexts.lock);
754 static inline struct i915_gem_engines *
755 __context_engines_await(const struct i915_gem_context *ctx)
757 struct i915_gem_engines *engines;
761 engines = rcu_dereference(ctx->engines);
762 GEM_BUG_ON(!engines);
764 if (unlikely(!i915_sw_fence_await(&engines->fence)))
767 if (likely(engines == rcu_access_pointer(ctx->engines)))
770 i915_sw_fence_complete(&engines->fence);
778 context_apply_all(struct i915_gem_context *ctx,
779 int (*fn)(struct intel_context *ce, void *data),
782 struct i915_gem_engines_iter it;
783 struct i915_gem_engines *e;
784 struct intel_context *ce;
787 e = __context_engines_await(ctx);
788 for_each_gem_engine(ce, e, it) {
793 i915_sw_fence_complete(&e->fence);
798 static int __apply_ppgtt(struct intel_context *ce, void *vm)
801 ce->vm = i915_vm_get(vm);
805 static struct i915_address_space *
806 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
808 struct i915_address_space *old;
810 old = rcu_replace_pointer(ctx->vm,
812 lockdep_is_held(&ctx->mutex));
813 GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
815 context_apply_all(ctx, __apply_ppgtt, vm);
820 static void __assign_ppgtt(struct i915_gem_context *ctx,
821 struct i915_address_space *vm)
823 if (vm == rcu_access_pointer(ctx->vm))
826 vm = __set_ppgtt(ctx, vm);
831 static void __set_timeline(struct intel_timeline **dst,
832 struct intel_timeline *src)
834 struct intel_timeline *old = *dst;
836 *dst = src ? intel_timeline_get(src) : NULL;
839 intel_timeline_put(old);
842 static int __apply_timeline(struct intel_context *ce, void *timeline)
844 __set_timeline(&ce->timeline, timeline);
848 static void __assign_timeline(struct i915_gem_context *ctx,
849 struct intel_timeline *timeline)
851 __set_timeline(&ctx->timeline, timeline);
852 context_apply_all(ctx, __apply_timeline, timeline);
855 static struct i915_gem_context *
856 i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
858 struct i915_gem_context *ctx;
860 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
861 !HAS_EXECLISTS(i915))
862 return ERR_PTR(-EINVAL);
864 /* Reap the stale contexts */
865 contexts_flush_free(&i915->gem.contexts);
867 ctx = __create_context(i915);
871 if (HAS_FULL_PPGTT(i915)) {
872 struct i915_ppgtt *ppgtt;
874 ppgtt = i915_ppgtt_create(&i915->gt);
876 drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
879 return ERR_CAST(ppgtt);
882 mutex_lock(&ctx->mutex);
883 __assign_ppgtt(ctx, &ppgtt->vm);
884 mutex_unlock(&ctx->mutex);
886 i915_vm_put(&ppgtt->vm);
889 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
890 struct intel_timeline *timeline;
892 timeline = intel_timeline_create(&i915->gt, NULL);
893 if (IS_ERR(timeline)) {
895 return ERR_CAST(timeline);
898 __assign_timeline(ctx, timeline);
899 intel_timeline_put(timeline);
902 trace_i915_context_create(ctx);
907 static void init_contexts(struct i915_gem_contexts *gc)
909 spin_lock_init(&gc->lock);
910 INIT_LIST_HEAD(&gc->list);
912 INIT_WORK(&gc->free_work, contexts_free_worker);
913 init_llist_head(&gc->free_list);
916 void i915_gem_init__contexts(struct drm_i915_private *i915)
918 init_contexts(&i915->gem.contexts);
919 drm_dbg(&i915->drm, "%s context support initialized\n",
920 DRIVER_CAPS(i915)->has_logical_contexts ?
924 void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
926 flush_work(&i915->gem.contexts.free_work);
927 rcu_barrier(); /* and flush the left over RCU frees */
930 static int gem_context_register(struct i915_gem_context *ctx,
931 struct drm_i915_file_private *fpriv,
934 struct i915_address_space *vm;
937 ctx->file_priv = fpriv;
939 mutex_lock(&ctx->mutex);
940 vm = i915_gem_context_vm(ctx);
942 WRITE_ONCE(vm->file, fpriv); /* XXX */
943 mutex_unlock(&ctx->mutex);
945 ctx->pid = get_task_pid(current, PIDTYPE_PID);
946 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
947 current->comm, pid_nr(ctx->pid));
949 /* And finally expose ourselves to userspace via the idr */
950 ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
952 put_pid(fetch_and_zero(&ctx->pid));
957 int i915_gem_context_open(struct drm_i915_private *i915,
958 struct drm_file *file)
960 struct drm_i915_file_private *file_priv = file->driver_priv;
961 struct i915_gem_context *ctx;
965 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
967 /* 0 reserved for invalid/unassigned ppgtt */
968 xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
970 ctx = i915_gem_create_context(i915, 0);
976 err = gem_context_register(ctx, file_priv, &id);
986 xa_destroy(&file_priv->vm_xa);
987 xa_destroy(&file_priv->context_xa);
991 void i915_gem_context_close(struct drm_file *file)
993 struct drm_i915_file_private *file_priv = file->driver_priv;
994 struct drm_i915_private *i915 = file_priv->dev_priv;
995 struct i915_address_space *vm;
996 struct i915_gem_context *ctx;
999 xa_for_each(&file_priv->context_xa, idx, ctx)
1001 xa_destroy(&file_priv->context_xa);
1003 xa_for_each(&file_priv->vm_xa, idx, vm)
1005 xa_destroy(&file_priv->vm_xa);
1007 contexts_flush_free(&i915->gem.contexts);
1010 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
1011 struct drm_file *file)
1013 struct drm_i915_private *i915 = to_i915(dev);
1014 struct drm_i915_gem_vm_control *args = data;
1015 struct drm_i915_file_private *file_priv = file->driver_priv;
1016 struct i915_ppgtt *ppgtt;
1020 if (!HAS_FULL_PPGTT(i915))
1026 ppgtt = i915_ppgtt_create(&i915->gt);
1028 return PTR_ERR(ppgtt);
1030 ppgtt->vm.file = file_priv;
1032 if (args->extensions) {
1033 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
1040 err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
1041 xa_limit_32b, GFP_KERNEL);
1045 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1050 i915_vm_put(&ppgtt->vm);
1054 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
1055 struct drm_file *file)
1057 struct drm_i915_file_private *file_priv = file->driver_priv;
1058 struct drm_i915_gem_vm_control *args = data;
1059 struct i915_address_space *vm;
1064 if (args->extensions)
1067 vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1075 struct context_barrier_task {
1076 struct i915_active base;
1077 void (*task)(void *data);
1082 static void cb_retire(struct i915_active *base)
1084 struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
1089 i915_active_fini(&cb->base);
1093 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
1094 static int context_barrier_task(struct i915_gem_context *ctx,
1095 intel_engine_mask_t engines,
1096 bool (*skip)(struct intel_context *ce, void *data),
1097 int (*emit)(struct i915_request *rq, void *data),
1098 void (*task)(void *data),
1101 struct context_barrier_task *cb;
1102 struct i915_gem_engines_iter it;
1103 struct i915_gem_engines *e;
1104 struct intel_context *ce;
1109 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1113 i915_active_init(&cb->base, NULL, cb_retire);
1114 err = i915_active_acquire(&cb->base);
1120 e = __context_engines_await(ctx);
1122 i915_active_release(&cb->base);
1126 for_each_gem_engine(ce, e, it) {
1127 struct i915_request *rq;
1129 if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
1130 ce->engine->mask)) {
1135 if (!(ce->engine->mask & engines))
1138 if (skip && skip(ce, data))
1141 rq = intel_context_create_request(ce);
1149 err = emit(rq, data);
1151 err = i915_active_add_request(&cb->base, rq);
1153 i915_request_add(rq);
1157 i915_sw_fence_complete(&e->fence);
1159 cb->task = err ? NULL : task; /* caller needs to unwind instead */
1162 i915_active_release(&cb->base);
1167 static int get_ppgtt(struct drm_i915_file_private *file_priv,
1168 struct i915_gem_context *ctx,
1169 struct drm_i915_gem_context_param *args)
1171 struct i915_address_space *vm;
1175 if (!rcu_access_pointer(ctx->vm))
1179 vm = context_get_vm_rcu(ctx);
1184 err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1190 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1199 static void set_ppgtt_barrier(void *data)
1201 struct i915_address_space *old = data;
1203 if (INTEL_GEN(old->i915) < 8)
1204 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
1209 static int emit_ppgtt_update(struct i915_request *rq, void *data)
1211 struct i915_address_space *vm = rq->context->vm;
1212 struct intel_engine_cs *engine = rq->engine;
1213 u32 base = engine->mmio_base;
1217 if (i915_vm_is_4lvl(vm)) {
1218 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1219 const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
1221 cs = intel_ring_begin(rq, 6);
1225 *cs++ = MI_LOAD_REGISTER_IMM(2);
1227 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1228 *cs++ = upper_32_bits(pd_daddr);
1229 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1230 *cs++ = lower_32_bits(pd_daddr);
1233 intel_ring_advance(rq, cs);
1234 } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1235 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1238 /* Magic required to prevent forcewake errors! */
1239 err = engine->emit_flush(rq, EMIT_INVALIDATE);
1243 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1247 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
1248 for (i = GEN8_3LVL_PDPES; i--; ) {
1249 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1251 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1252 *cs++ = upper_32_bits(pd_daddr);
1253 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1254 *cs++ = lower_32_bits(pd_daddr);
1257 intel_ring_advance(rq, cs);
1263 static bool skip_ppgtt_update(struct intel_context *ce, void *data)
1265 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
1268 if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
1271 if (!atomic_read(&ce->pin_count))
1274 /* ppGTT is not part of the legacy context image */
1275 if (gen6_ppgtt_pin(i915_vm_to_ppgtt(ce->vm)))
1281 static int set_ppgtt(struct drm_i915_file_private *file_priv,
1282 struct i915_gem_context *ctx,
1283 struct drm_i915_gem_context_param *args)
1285 struct i915_address_space *vm, *old;
1291 if (!rcu_access_pointer(ctx->vm))
1294 if (upper_32_bits(args->value))
1298 vm = xa_load(&file_priv->vm_xa, args->value);
1299 if (vm && !kref_get_unless_zero(&vm->ref))
1305 err = mutex_lock_interruptible(&ctx->mutex);
1309 if (i915_gem_context_is_closed(ctx)) {
1314 if (vm == rcu_access_pointer(ctx->vm))
1317 old = __set_ppgtt(ctx, vm);
1319 /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1323 * We need to flush any requests using the current ppgtt before
1324 * we release it as the requests do not hold a reference themselves,
1325 * only indirectly through the context.
1327 err = context_barrier_task(ctx, ALL_ENGINES,
1333 i915_vm_close(__set_ppgtt(ctx, old));
1335 lut_close(ctx); /* force a rebuild of the old obj:vma cache */
1339 mutex_unlock(&ctx->mutex);
1345 static int __apply_ringsize(struct intel_context *ce, void *sz)
1347 return intel_context_set_ring_size(ce, (unsigned long)sz);
1350 static int set_ringsize(struct i915_gem_context *ctx,
1351 struct drm_i915_gem_context_param *args)
1353 if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
1359 if (!IS_ALIGNED(args->value, I915_GTT_PAGE_SIZE))
1362 if (args->value < I915_GTT_PAGE_SIZE)
1365 if (args->value > 128 * I915_GTT_PAGE_SIZE)
1368 return context_apply_all(ctx,
1370 __intel_context_ring_size(args->value));
1373 static int __get_ringsize(struct intel_context *ce, void *arg)
1377 sz = intel_context_get_ring_size(ce);
1378 GEM_BUG_ON(sz > INT_MAX);
1380 return sz; /* stop on first engine */
1383 static int get_ringsize(struct i915_gem_context *ctx,
1384 struct drm_i915_gem_context_param *args)
1388 if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
1394 sz = context_apply_all(ctx, __get_ringsize, NULL);
1403 i915_gem_user_to_context_sseu(struct intel_gt *gt,
1404 const struct drm_i915_gem_context_param_sseu *user,
1405 struct intel_sseu *context)
1407 const struct sseu_dev_info *device = >->info.sseu;
1408 struct drm_i915_private *i915 = gt->i915;
1410 /* No zeros in any field. */
1411 if (!user->slice_mask || !user->subslice_mask ||
1412 !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1416 if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1420 * Some future proofing on the types since the uAPI is wider than the
1421 * current internal implementation.
1423 if (overflows_type(user->slice_mask, context->slice_mask) ||
1424 overflows_type(user->subslice_mask, context->subslice_mask) ||
1425 overflows_type(user->min_eus_per_subslice,
1426 context->min_eus_per_subslice) ||
1427 overflows_type(user->max_eus_per_subslice,
1428 context->max_eus_per_subslice))
1431 /* Check validity against hardware. */
1432 if (user->slice_mask & ~device->slice_mask)
1435 if (user->subslice_mask & ~device->subslice_mask[0])
1438 if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1441 context->slice_mask = user->slice_mask;
1442 context->subslice_mask = user->subslice_mask;
1443 context->min_eus_per_subslice = user->min_eus_per_subslice;
1444 context->max_eus_per_subslice = user->max_eus_per_subslice;
1446 /* Part specific restrictions. */
1447 if (IS_GEN(i915, 11)) {
1448 unsigned int hw_s = hweight8(device->slice_mask);
1449 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1450 unsigned int req_s = hweight8(context->slice_mask);
1451 unsigned int req_ss = hweight8(context->subslice_mask);
1454 * Only full subslice enablement is possible if more than one
1455 * slice is turned on.
1457 if (req_s > 1 && req_ss != hw_ss_per_s)
1461 * If more than four (SScount bitfield limit) subslices are
1462 * requested then the number has to be even.
1464 if (req_ss > 4 && (req_ss & 1))
1468 * If only one slice is enabled and subslice count is below the
1469 * device full enablement, it must be at most half of the all
1470 * available subslices.
1472 if (req_s == 1 && req_ss < hw_ss_per_s &&
1473 req_ss > (hw_ss_per_s / 2))
1476 /* ABI restriction - VME use case only. */
1478 /* All slices or one slice only. */
1479 if (req_s != 1 && req_s != hw_s)
1483 * Half subslices or full enablement only when one slice is
1487 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1490 /* No EU configuration changes. */
1491 if ((user->min_eus_per_subslice !=
1492 device->max_eus_per_subslice) ||
1493 (user->max_eus_per_subslice !=
1494 device->max_eus_per_subslice))
1501 static int set_sseu(struct i915_gem_context *ctx,
1502 struct drm_i915_gem_context_param *args)
1504 struct drm_i915_private *i915 = ctx->i915;
1505 struct drm_i915_gem_context_param_sseu user_sseu;
1506 struct intel_context *ce;
1507 struct intel_sseu sseu;
1508 unsigned long lookup;
1511 if (args->size < sizeof(user_sseu))
1514 if (!IS_GEN(i915, 11))
1517 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1524 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1528 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1529 lookup |= LOOKUP_USER_INDEX;
1531 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1535 /* Only render engine supports RPCS configuration. */
1536 if (ce->engine->class != RENDER_CLASS) {
1541 ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
1545 ret = intel_context_reconfigure_sseu(ce, sseu);
1549 args->size = sizeof(user_sseu);
1552 intel_context_put(ce);
1556 struct set_engines {
1557 struct i915_gem_context *ctx;
1558 struct i915_gem_engines *engines;
1562 set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1564 struct i915_context_engines_load_balance __user *ext =
1565 container_of_user(base, typeof(*ext), base);
1566 const struct set_engines *set = data;
1567 struct drm_i915_private *i915 = set->ctx->i915;
1568 struct intel_engine_cs *stack[16];
1569 struct intel_engine_cs **siblings;
1570 struct intel_context *ce;
1571 u16 num_siblings, idx;
1575 if (!HAS_EXECLISTS(i915))
1578 if (intel_uc_uses_guc_submission(&i915->gt.uc))
1579 return -ENODEV; /* not implement yet */
1581 if (get_user(idx, &ext->engine_index))
1584 if (idx >= set->engines->num_engines) {
1585 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
1586 idx, set->engines->num_engines);
1590 idx = array_index_nospec(idx, set->engines->num_engines);
1591 if (set->engines->engines[idx]) {
1593 "Invalid placement[%d], already occupied\n", idx);
1597 if (get_user(num_siblings, &ext->num_siblings))
1600 err = check_user_mbz(&ext->flags);
1604 err = check_user_mbz(&ext->mbz64);
1609 if (num_siblings > ARRAY_SIZE(stack)) {
1610 siblings = kmalloc_array(num_siblings,
1617 for (n = 0; n < num_siblings; n++) {
1618 struct i915_engine_class_instance ci;
1620 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1625 siblings[n] = intel_engine_lookup_user(i915,
1627 ci.engine_instance);
1630 "Invalid sibling[%d]: { class:%d, inst:%d }\n",
1631 n, ci.engine_class, ci.engine_instance);
1637 ce = intel_execlists_create_virtual(siblings, n);
1643 intel_context_set_gem(ce, set->ctx);
1645 if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1646 intel_context_put(ce);
1652 if (siblings != stack)
1659 set_engines__bond(struct i915_user_extension __user *base, void *data)
1661 struct i915_context_engines_bond __user *ext =
1662 container_of_user(base, typeof(*ext), base);
1663 const struct set_engines *set = data;
1664 struct drm_i915_private *i915 = set->ctx->i915;
1665 struct i915_engine_class_instance ci;
1666 struct intel_engine_cs *virtual;
1667 struct intel_engine_cs *master;
1671 if (get_user(idx, &ext->virtual_index))
1674 if (idx >= set->engines->num_engines) {
1676 "Invalid index for virtual engine: %d >= %d\n",
1677 idx, set->engines->num_engines);
1681 idx = array_index_nospec(idx, set->engines->num_engines);
1682 if (!set->engines->engines[idx]) {
1683 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
1686 virtual = set->engines->engines[idx]->engine;
1688 err = check_user_mbz(&ext->flags);
1692 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1693 err = check_user_mbz(&ext->mbz64[n]);
1698 if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1701 master = intel_engine_lookup_user(i915,
1702 ci.engine_class, ci.engine_instance);
1705 "Unrecognised master engine: { class:%u, instance:%u }\n",
1706 ci.engine_class, ci.engine_instance);
1710 if (get_user(num_bonds, &ext->num_bonds))
1713 for (n = 0; n < num_bonds; n++) {
1714 struct intel_engine_cs *bond;
1716 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1719 bond = intel_engine_lookup_user(i915,
1721 ci.engine_instance);
1724 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1725 n, ci.engine_class, ci.engine_instance);
1730 * A non-virtual engine has no siblings to choose between; and
1731 * a submit fence will always be directed to the one engine.
1733 if (intel_engine_is_virtual(virtual)) {
1734 err = intel_virtual_engine_attach_bond(virtual,
1745 static const i915_user_extension_fn set_engines__extensions[] = {
1746 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1747 [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1751 set_engines(struct i915_gem_context *ctx,
1752 const struct drm_i915_gem_context_param *args)
1754 struct drm_i915_private *i915 = ctx->i915;
1755 struct i915_context_param_engines __user *user =
1756 u64_to_user_ptr(args->value);
1757 struct set_engines set = { .ctx = ctx };
1758 unsigned int num_engines, n;
1762 if (!args->size) { /* switch back to legacy user_ring_map */
1763 if (!i915_gem_context_user_engines(ctx))
1766 set.engines = default_engines(ctx);
1767 if (IS_ERR(set.engines))
1768 return PTR_ERR(set.engines);
1773 BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1774 if (args->size < sizeof(*user) ||
1775 !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1776 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
1782 * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1783 * first 64 engines defined here.
1785 num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1786 set.engines = alloc_engines(num_engines);
1790 for (n = 0; n < num_engines; n++) {
1791 struct i915_engine_class_instance ci;
1792 struct intel_engine_cs *engine;
1793 struct intel_context *ce;
1795 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1796 __free_engines(set.engines, n);
1800 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1801 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1802 set.engines->engines[n] = NULL;
1806 engine = intel_engine_lookup_user(ctx->i915,
1808 ci.engine_instance);
1811 "Invalid engine[%d]: { class:%d, instance:%d }\n",
1812 n, ci.engine_class, ci.engine_instance);
1813 __free_engines(set.engines, n);
1817 ce = intel_context_create(engine);
1819 __free_engines(set.engines, n);
1823 intel_context_set_gem(ce, ctx);
1825 set.engines->engines[n] = ce;
1827 set.engines->num_engines = num_engines;
1830 if (!get_user(extensions, &user->extensions))
1831 err = i915_user_extensions(u64_to_user_ptr(extensions),
1832 set_engines__extensions,
1833 ARRAY_SIZE(set_engines__extensions),
1836 free_engines(set.engines);
1841 mutex_lock(&ctx->engines_mutex);
1842 if (i915_gem_context_is_closed(ctx)) {
1843 mutex_unlock(&ctx->engines_mutex);
1844 free_engines(set.engines);
1848 i915_gem_context_set_user_engines(ctx);
1850 i915_gem_context_clear_user_engines(ctx);
1851 set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1);
1852 mutex_unlock(&ctx->engines_mutex);
1854 /* Keep track of old engine sets for kill_context() */
1855 engines_idle_release(ctx, set.engines);
1860 static struct i915_gem_engines *
1861 __copy_engines(struct i915_gem_engines *e)
1863 struct i915_gem_engines *copy;
1866 copy = alloc_engines(e->num_engines);
1868 return ERR_PTR(-ENOMEM);
1870 for (n = 0; n < e->num_engines; n++) {
1872 copy->engines[n] = intel_context_get(e->engines[n]);
1874 copy->engines[n] = NULL;
1876 copy->num_engines = n;
1882 get_engines(struct i915_gem_context *ctx,
1883 struct drm_i915_gem_context_param *args)
1885 struct i915_context_param_engines __user *user;
1886 struct i915_gem_engines *e;
1887 size_t n, count, size;
1890 err = mutex_lock_interruptible(&ctx->engines_mutex);
1895 if (i915_gem_context_user_engines(ctx))
1896 e = __copy_engines(i915_gem_context_engines(ctx));
1897 mutex_unlock(&ctx->engines_mutex);
1898 if (IS_ERR_OR_NULL(e)) {
1900 return PTR_ERR_OR_ZERO(e);
1903 count = e->num_engines;
1905 /* Be paranoid in case we have an impedance mismatch */
1906 if (!check_struct_size(user, engines, count, &size)) {
1910 if (overflows_type(size, args->size)) {
1920 if (args->size < size) {
1925 user = u64_to_user_ptr(args->value);
1926 if (put_user(0, &user->extensions)) {
1931 for (n = 0; n < count; n++) {
1932 struct i915_engine_class_instance ci = {
1933 .engine_class = I915_ENGINE_CLASS_INVALID,
1934 .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1937 if (e->engines[n]) {
1938 ci.engine_class = e->engines[n]->engine->uabi_class;
1939 ci.engine_instance = e->engines[n]->engine->uabi_instance;
1942 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1956 set_persistence(struct i915_gem_context *ctx,
1957 const struct drm_i915_gem_context_param *args)
1962 return __context_set_persistence(ctx, args->value);
1965 static int __apply_priority(struct intel_context *ce, void *arg)
1967 struct i915_gem_context *ctx = arg;
1969 if (!intel_engine_has_timeslices(ce->engine))
1972 if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
1973 intel_context_set_use_semaphores(ce);
1975 intel_context_clear_use_semaphores(ce);
1980 static int set_priority(struct i915_gem_context *ctx,
1981 const struct drm_i915_gem_context_param *args)
1983 s64 priority = args->value;
1988 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1991 if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1992 priority < I915_CONTEXT_MIN_USER_PRIORITY)
1995 if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1996 !capable(CAP_SYS_NICE))
1999 ctx->sched.priority = I915_USER_PRIORITY(priority);
2000 context_apply_all(ctx, __apply_priority, ctx);
2005 static int ctx_setparam(struct drm_i915_file_private *fpriv,
2006 struct i915_gem_context *ctx,
2007 struct drm_i915_gem_context_param *args)
2011 switch (args->param) {
2012 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2015 else if (args->value)
2016 set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2018 clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2021 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2024 else if (args->value)
2025 i915_gem_context_set_no_error_capture(ctx);
2027 i915_gem_context_clear_no_error_capture(ctx);
2030 case I915_CONTEXT_PARAM_BANNABLE:
2033 else if (!capable(CAP_SYS_ADMIN) && !args->value)
2035 else if (args->value)
2036 i915_gem_context_set_bannable(ctx);
2038 i915_gem_context_clear_bannable(ctx);
2041 case I915_CONTEXT_PARAM_RECOVERABLE:
2044 else if (args->value)
2045 i915_gem_context_set_recoverable(ctx);
2047 i915_gem_context_clear_recoverable(ctx);
2050 case I915_CONTEXT_PARAM_PRIORITY:
2051 ret = set_priority(ctx, args);
2054 case I915_CONTEXT_PARAM_SSEU:
2055 ret = set_sseu(ctx, args);
2058 case I915_CONTEXT_PARAM_VM:
2059 ret = set_ppgtt(fpriv, ctx, args);
2062 case I915_CONTEXT_PARAM_ENGINES:
2063 ret = set_engines(ctx, args);
2066 case I915_CONTEXT_PARAM_PERSISTENCE:
2067 ret = set_persistence(ctx, args);
2070 case I915_CONTEXT_PARAM_RINGSIZE:
2071 ret = set_ringsize(ctx, args);
2074 case I915_CONTEXT_PARAM_BAN_PERIOD:
2084 struct i915_gem_context *ctx;
2085 struct drm_i915_file_private *fpriv;
2088 static int create_setparam(struct i915_user_extension __user *ext, void *data)
2090 struct drm_i915_gem_context_create_ext_setparam local;
2091 const struct create_ext *arg = data;
2093 if (copy_from_user(&local, ext, sizeof(local)))
2096 if (local.param.ctx_id)
2099 return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
2102 static int copy_ring_size(struct intel_context *dst,
2103 struct intel_context *src)
2107 sz = intel_context_get_ring_size(src);
2111 return intel_context_set_ring_size(dst, sz);
2114 static int clone_engines(struct i915_gem_context *dst,
2115 struct i915_gem_context *src)
2117 struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
2118 struct i915_gem_engines *clone;
2122 clone = alloc_engines(e->num_engines);
2126 for (n = 0; n < e->num_engines; n++) {
2127 struct intel_engine_cs *engine;
2129 if (!e->engines[n]) {
2130 clone->engines[n] = NULL;
2133 engine = e->engines[n]->engine;
2136 * Virtual engines are singletons; they can only exist
2137 * inside a single context, because they embed their
2138 * HW context... As each virtual context implies a single
2139 * timeline (each engine can only dequeue a single request
2140 * at any time), it would be surprising for two contexts
2141 * to use the same engine. So let's create a copy of
2142 * the virtual engine instead.
2144 if (intel_engine_is_virtual(engine))
2146 intel_execlists_clone_virtual(engine);
2148 clone->engines[n] = intel_context_create(engine);
2149 if (IS_ERR_OR_NULL(clone->engines[n])) {
2150 __free_engines(clone, n);
2154 intel_context_set_gem(clone->engines[n], dst);
2156 /* Copy across the preferred ringsize */
2157 if (copy_ring_size(clone->engines[n], e->engines[n])) {
2158 __free_engines(clone, n + 1);
2162 clone->num_engines = n;
2164 user_engines = i915_gem_context_user_engines(src);
2165 i915_gem_context_unlock_engines(src);
2167 /* Serialised by constructor */
2168 engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1));
2170 i915_gem_context_set_user_engines(dst);
2172 i915_gem_context_clear_user_engines(dst);
2176 i915_gem_context_unlock_engines(src);
2180 static int clone_flags(struct i915_gem_context *dst,
2181 struct i915_gem_context *src)
2183 dst->user_flags = src->user_flags;
2187 static int clone_schedattr(struct i915_gem_context *dst,
2188 struct i915_gem_context *src)
2190 dst->sched = src->sched;
2194 static int clone_sseu(struct i915_gem_context *dst,
2195 struct i915_gem_context *src)
2197 struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
2198 struct i915_gem_engines *clone;
2202 /* no locking required; sole access under constructor*/
2203 clone = __context_engines_static(dst);
2204 if (e->num_engines != clone->num_engines) {
2209 for (n = 0; n < e->num_engines; n++) {
2210 struct intel_context *ce = e->engines[n];
2212 if (clone->engines[n]->engine->class != ce->engine->class) {
2213 /* Must have compatible engine maps! */
2218 /* serialises with set_sseu */
2219 err = intel_context_lock_pinned(ce);
2223 clone->engines[n]->sseu = ce->sseu;
2224 intel_context_unlock_pinned(ce);
2229 i915_gem_context_unlock_engines(src);
2233 static int clone_timeline(struct i915_gem_context *dst,
2234 struct i915_gem_context *src)
2237 __assign_timeline(dst, src->timeline);
2242 static int clone_vm(struct i915_gem_context *dst,
2243 struct i915_gem_context *src)
2245 struct i915_address_space *vm;
2248 if (!rcu_access_pointer(src->vm))
2252 vm = context_get_vm_rcu(src);
2255 if (!mutex_lock_interruptible(&dst->mutex)) {
2256 __assign_ppgtt(dst, vm);
2257 mutex_unlock(&dst->mutex);
2266 static int create_clone(struct i915_user_extension __user *ext, void *data)
2268 static int (* const fn[])(struct i915_gem_context *dst,
2269 struct i915_gem_context *src) = {
2270 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
2271 MAP(ENGINES, clone_engines),
2272 MAP(FLAGS, clone_flags),
2273 MAP(SCHEDATTR, clone_schedattr),
2274 MAP(SSEU, clone_sseu),
2275 MAP(TIMELINE, clone_timeline),
2279 struct drm_i915_gem_context_create_ext_clone local;
2280 const struct create_ext *arg = data;
2281 struct i915_gem_context *dst = arg->ctx;
2282 struct i915_gem_context *src;
2285 if (copy_from_user(&local, ext, sizeof(local)))
2288 BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
2289 I915_CONTEXT_CLONE_UNKNOWN);
2291 if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
2298 src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
2303 GEM_BUG_ON(src == dst);
2305 for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
2306 if (!(local.flags & BIT(bit)))
2309 err = fn[bit](dst, src);
2317 static const i915_user_extension_fn create_extensions[] = {
2318 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
2319 [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
2322 static bool client_is_banned(struct drm_i915_file_private *file_priv)
2324 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
2327 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2328 struct drm_file *file)
2330 struct drm_i915_private *i915 = to_i915(dev);
2331 struct drm_i915_gem_context_create_ext *args = data;
2332 struct create_ext ext_data;
2336 if (!DRIVER_CAPS(i915)->has_logical_contexts)
2339 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
2342 ret = intel_gt_terminally_wedged(&i915->gt);
2346 ext_data.fpriv = file->driver_priv;
2347 if (client_is_banned(ext_data.fpriv)) {
2349 "client %s[%d] banned from creating ctx\n",
2350 current->comm, task_pid_nr(current));
2354 ext_data.ctx = i915_gem_create_context(i915, args->flags);
2355 if (IS_ERR(ext_data.ctx))
2356 return PTR_ERR(ext_data.ctx);
2358 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2359 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2361 ARRAY_SIZE(create_extensions),
2367 ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id);
2372 drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
2377 context_close(ext_data.ctx);
2381 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2382 struct drm_file *file)
2384 struct drm_i915_gem_context_destroy *args = data;
2385 struct drm_i915_file_private *file_priv = file->driver_priv;
2386 struct i915_gem_context *ctx;
2394 ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2402 static int get_sseu(struct i915_gem_context *ctx,
2403 struct drm_i915_gem_context_param *args)
2405 struct drm_i915_gem_context_param_sseu user_sseu;
2406 struct intel_context *ce;
2407 unsigned long lookup;
2410 if (args->size == 0)
2412 else if (args->size < sizeof(user_sseu))
2415 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2422 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2426 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2427 lookup |= LOOKUP_USER_INDEX;
2429 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2433 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2435 intel_context_put(ce);
2439 user_sseu.slice_mask = ce->sseu.slice_mask;
2440 user_sseu.subslice_mask = ce->sseu.subslice_mask;
2441 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2442 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2444 intel_context_unlock_pinned(ce);
2445 intel_context_put(ce);
2447 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2452 args->size = sizeof(user_sseu);
2457 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2458 struct drm_file *file)
2460 struct drm_i915_file_private *file_priv = file->driver_priv;
2461 struct drm_i915_gem_context_param *args = data;
2462 struct i915_gem_context *ctx;
2465 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2469 switch (args->param) {
2470 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2472 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
2475 case I915_CONTEXT_PARAM_GTT_SIZE:
2478 if (rcu_access_pointer(ctx->vm))
2479 args->value = rcu_dereference(ctx->vm)->total;
2481 args->value = to_i915(dev)->ggtt.vm.total;
2485 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2487 args->value = i915_gem_context_no_error_capture(ctx);
2490 case I915_CONTEXT_PARAM_BANNABLE:
2492 args->value = i915_gem_context_is_bannable(ctx);
2495 case I915_CONTEXT_PARAM_RECOVERABLE:
2497 args->value = i915_gem_context_is_recoverable(ctx);
2500 case I915_CONTEXT_PARAM_PRIORITY:
2502 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
2505 case I915_CONTEXT_PARAM_SSEU:
2506 ret = get_sseu(ctx, args);
2509 case I915_CONTEXT_PARAM_VM:
2510 ret = get_ppgtt(file_priv, ctx, args);
2513 case I915_CONTEXT_PARAM_ENGINES:
2514 ret = get_engines(ctx, args);
2517 case I915_CONTEXT_PARAM_PERSISTENCE:
2519 args->value = i915_gem_context_is_persistent(ctx);
2522 case I915_CONTEXT_PARAM_RINGSIZE:
2523 ret = get_ringsize(ctx, args);
2526 case I915_CONTEXT_PARAM_BAN_PERIOD:
2532 i915_gem_context_put(ctx);
2536 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2537 struct drm_file *file)
2539 struct drm_i915_file_private *file_priv = file->driver_priv;
2540 struct drm_i915_gem_context_param *args = data;
2541 struct i915_gem_context *ctx;
2544 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2548 ret = ctx_setparam(file_priv, ctx, args);
2550 i915_gem_context_put(ctx);
2554 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2555 void *data, struct drm_file *file)
2557 struct drm_i915_private *i915 = to_i915(dev);
2558 struct drm_i915_reset_stats *args = data;
2559 struct i915_gem_context *ctx;
2562 if (args->flags || args->pad)
2567 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2572 * We opt for unserialised reads here. This may result in tearing
2573 * in the extremely unlikely event of a GPU hang on this context
2574 * as we are querying them. If we need that extra layer of protection,
2575 * we should wrap the hangstats with a seqlock.
2578 if (capable(CAP_SYS_ADMIN))
2579 args->reset_count = i915_reset_count(&i915->gpu_error);
2581 args->reset_count = 0;
2583 args->batch_active = atomic_read(&ctx->guilty_count);
2584 args->batch_pending = atomic_read(&ctx->active_count);
2592 /* GEM context-engines iterator: for_each_gem_engine() */
2593 struct intel_context *
2594 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2596 const struct i915_gem_engines *e = it->engines;
2597 struct intel_context *ctx;
2603 if (it->idx >= e->num_engines)
2606 ctx = e->engines[it->idx++];
2612 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2613 #include "selftests/mock_context.c"
2614 #include "selftests/i915_gem_context.c"
2617 static void i915_global_gem_context_shrink(void)
2619 kmem_cache_shrink(global.slab_luts);
2622 static void i915_global_gem_context_exit(void)
2624 kmem_cache_destroy(global.slab_luts);
2627 static struct i915_global_gem_context global = { {
2628 .shrink = i915_global_gem_context_shrink,
2629 .exit = i915_global_gem_context_exit,
2632 int __init i915_global_gem_context_init(void)
2634 global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2635 if (!global.slab_luts)
2638 i915_global_register(&global.base);