2 * SPDX-License-Identifier: MIT
4 * Copyright © 2011-2012 Intel Corporation
8 * This file implements HW context support. On gen5+ a HW context consists of an
9 * opaque GPU object which is referenced at times of context saves and restores.
10 * With RC6 enabled, the context is also referenced as the GPU enters and exists
11 * from RC6 (GPU has it's own internal power context, except on gen5). Though
12 * something like a context does exist for the media ring, the code only
13 * supports contexts for the render ring.
15 * In software, there is a distinction between contexts created by the user,
16 * and the default HW context. The default HW context is used by GPU clients
17 * that do not request setup of their own hardware context. The default
18 * context's state is never restored to help prevent programming errors. This
19 * would happen if a client ran and piggy-backed off another clients GPU state.
20 * The default context only exists to give the GPU some offset to load as the
21 * current to invoke a save of the context we actually care about. In fact, the
22 * code could likely be constructed, albeit in a more complicated fashion, to
23 * never use the default context, though that limits the driver's ability to
24 * swap out, and/or destroy other contexts.
26 * All other contexts are created as a request by the GPU client. These contexts
27 * store GPU state, and thus allow GPU clients to not re-emit state (and
28 * potentially query certain state) at any time. The kernel driver makes
29 * certain that the appropriate commands are inserted.
31 * The context life cycle is semi-complicated in that context BOs may live
32 * longer than the context itself because of the way the hardware, and object
33 * tracking works. Below is a very crude representation of the state machine
34 * describing the context life.
35 * refcount pincount active
36 * S0: initial state 0 0 0
37 * S1: context created 1 0 0
38 * S2: context is currently running 2 1 X
39 * S3: GPU referenced, but not current 2 0 1
40 * S4: context is current, but destroyed 1 1 0
41 * S5: like S3, but destroyed 1 0 1
43 * The most common (but not all) transitions:
44 * S0->S1: client creates a context
45 * S1->S2: client submits execbuf with context
46 * S2->S3: other clients submits execbuf with context
47 * S3->S1: context object was retired
48 * S3->S2: clients submits another execbuf
49 * S2->S4: context destroy called with current context
50 * S3->S5->S0: destroy path
51 * S4->S5->S0: destroy path on current context
53 * There are two confusing terms used above:
54 * The "current context" means the context which is currently running on the
55 * GPU. The GPU has loaded its state already and has stored away the gtt
56 * offset of the BO. The GPU is not actively referencing the data at this
57 * offset, but it will on the next context switch. The only way to avoid this
58 * is to do a GPU reset.
60 * An "active context' is one which was previously the "current context" and is
61 * on the active list waiting for the next context switch to occur. Until this
62 * happens, the object must remain at the same gtt offset. It is therefore
63 * possible to destroy a context, but it is still active.
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
70 #include "gt/gen6_ppgtt.h"
71 #include "gt/intel_context.h"
72 #include "gt/intel_context_param.h"
73 #include "gt/intel_engine_heartbeat.h"
74 #include "gt/intel_engine_user.h"
75 #include "gt/intel_execlists_submission.h" /* virtual_engine */
76 #include "gt/intel_gpu_commands.h"
77 #include "gt/intel_ring.h"
79 #include "i915_gem_context.h"
80 #include "i915_globals.h"
81 #include "i915_trace.h"
82 #include "i915_user_extensions.h"
84 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
86 static struct i915_global_gem_context {
87 struct i915_global base;
88 struct kmem_cache *slab_luts;
91 struct i915_lut_handle *i915_lut_handle_alloc(void)
93 return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
96 void i915_lut_handle_free(struct i915_lut_handle *lut)
98 return kmem_cache_free(global.slab_luts, lut);
101 static void lut_close(struct i915_gem_context *ctx)
103 struct radix_tree_iter iter;
106 mutex_lock(&ctx->lut_mutex);
108 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
109 struct i915_vma *vma = rcu_dereference_raw(*slot);
110 struct drm_i915_gem_object *obj = vma->obj;
111 struct i915_lut_handle *lut;
113 if (!kref_get_unless_zero(&obj->base.refcount))
116 spin_lock(&obj->lut_lock);
117 list_for_each_entry(lut, &obj->lut_list, obj_link) {
121 if (lut->handle != iter.index)
124 list_del(&lut->obj_link);
127 spin_unlock(&obj->lut_lock);
129 if (&lut->obj_link != &obj->lut_list) {
130 i915_lut_handle_free(lut);
131 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
133 i915_gem_object_put(obj);
136 i915_gem_object_put(obj);
139 mutex_unlock(&ctx->lut_mutex);
142 static struct intel_context *
143 lookup_user_engine(struct i915_gem_context *ctx,
145 const struct i915_engine_class_instance *ci)
146 #define LOOKUP_USER_INDEX BIT(0)
150 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
151 return ERR_PTR(-EINVAL);
153 if (!i915_gem_context_user_engines(ctx)) {
154 struct intel_engine_cs *engine;
156 engine = intel_engine_lookup_user(ctx->i915,
158 ci->engine_instance);
160 return ERR_PTR(-EINVAL);
162 idx = engine->legacy_idx;
164 idx = ci->engine_instance;
167 return i915_gem_context_get_engine(ctx, idx);
170 static struct i915_address_space *
171 context_get_vm_rcu(struct i915_gem_context *ctx)
173 GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
176 struct i915_address_space *vm;
179 * We do not allow downgrading from full-ppgtt [to a shared
180 * global gtt], so ctx->vm cannot become NULL.
182 vm = rcu_dereference(ctx->vm);
183 if (!kref_get_unless_zero(&vm->ref))
187 * This ppgtt may have be reallocated between
188 * the read and the kref, and reassigned to a third
189 * context. In order to avoid inadvertent sharing
190 * of this ppgtt with that third context (and not
191 * src), we have to confirm that we have the same
192 * ppgtt after passing through the strong memory
193 * barrier implied by a successful
194 * kref_get_unless_zero().
196 * Once we have acquired the current ppgtt of ctx,
197 * we no longer care if it is released from ctx, as
198 * it cannot be reallocated elsewhere.
201 if (vm == rcu_access_pointer(ctx->vm))
202 return rcu_pointer_handoff(vm);
208 static void intel_context_set_gem(struct intel_context *ce,
209 struct i915_gem_context *ctx)
211 GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
212 RCU_INIT_POINTER(ce->gem_context, ctx);
214 ce->ring_size = SZ_16K;
216 if (rcu_access_pointer(ctx->vm)) {
217 struct i915_address_space *vm;
220 vm = context_get_vm_rcu(ctx); /* hmm */
227 GEM_BUG_ON(ce->timeline);
229 ce->timeline = intel_timeline_get(ctx->timeline);
231 if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
232 intel_engine_has_timeslices(ce->engine))
233 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
235 if (IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) &&
236 ctx->i915->params.request_timeout_ms) {
237 unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
239 intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
243 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
246 if (!e->engines[count])
249 intel_context_put(e->engines[count]);
254 static void free_engines(struct i915_gem_engines *e)
256 __free_engines(e, e->num_engines);
259 static void free_engines_rcu(struct rcu_head *rcu)
261 struct i915_gem_engines *engines =
262 container_of(rcu, struct i915_gem_engines, rcu);
264 i915_sw_fence_fini(&engines->fence);
265 free_engines(engines);
268 static int __i915_sw_fence_call
269 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
271 struct i915_gem_engines *engines =
272 container_of(fence, typeof(*engines), fence);
276 if (!list_empty(&engines->link)) {
277 struct i915_gem_context *ctx = engines->ctx;
280 spin_lock_irqsave(&ctx->stale.lock, flags);
281 list_del(&engines->link);
282 spin_unlock_irqrestore(&ctx->stale.lock, flags);
284 i915_gem_context_put(engines->ctx);
288 init_rcu_head(&engines->rcu);
289 call_rcu(&engines->rcu, free_engines_rcu);
296 static struct i915_gem_engines *alloc_engines(unsigned int count)
298 struct i915_gem_engines *e;
300 e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
304 i915_sw_fence_init(&e->fence, engines_notify);
308 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
310 const struct intel_gt *gt = &ctx->i915->gt;
311 struct intel_engine_cs *engine;
312 struct i915_gem_engines *e;
313 enum intel_engine_id id;
315 e = alloc_engines(I915_NUM_ENGINES);
317 return ERR_PTR(-ENOMEM);
319 for_each_engine(engine, gt, id) {
320 struct intel_context *ce;
322 if (engine->legacy_idx == INVALID_ENGINE)
325 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
326 GEM_BUG_ON(e->engines[engine->legacy_idx]);
328 ce = intel_context_create(engine);
330 __free_engines(e, e->num_engines + 1);
334 intel_context_set_gem(ce, ctx);
336 e->engines[engine->legacy_idx] = ce;
337 e->num_engines = max(e->num_engines, engine->legacy_idx);
344 void i915_gem_context_release(struct kref *ref)
346 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
348 trace_i915_context_free(ctx);
349 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
351 mutex_destroy(&ctx->engines_mutex);
352 mutex_destroy(&ctx->lut_mutex);
355 intel_timeline_put(ctx->timeline);
358 mutex_destroy(&ctx->mutex);
363 static inline struct i915_gem_engines *
364 __context_engines_static(const struct i915_gem_context *ctx)
366 return rcu_dereference_protected(ctx->engines, true);
369 static void __reset_context(struct i915_gem_context *ctx,
370 struct intel_engine_cs *engine)
372 intel_gt_handle_error(engine->gt, engine->mask, 0,
373 "context closure in %s", ctx->name);
376 static bool __cancel_engine(struct intel_engine_cs *engine)
379 * Send a "high priority pulse" down the engine to cause the
380 * current request to be momentarily preempted. (If it fails to
381 * be preempted, it will be reset). As we have marked our context
382 * as banned, any incomplete request, including any running, will
383 * be skipped following the preemption.
385 * If there is no hangchecking (one of the reasons why we try to
386 * cancel the context) and no forced preemption, there may be no
387 * means by which we reset the GPU and evict the persistent hog.
388 * Ergo if we are unable to inject a preemptive pulse that can
389 * kill the banned context, we fallback to doing a local reset
392 return intel_engine_pulse(engine) == 0;
395 static struct intel_engine_cs *active_engine(struct intel_context *ce)
397 struct intel_engine_cs *engine = NULL;
398 struct i915_request *rq;
400 if (intel_context_has_inflight(ce))
401 return intel_context_inflight(ce);
407 * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
408 * to the request to prevent it being transferred to a new timeline
409 * (and onto a new timeline->requests list).
412 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
415 /* timeline is already completed upto this point? */
416 if (!i915_request_get_rcu(rq))
419 /* Check with the backend if the request is inflight */
421 if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
422 found = i915_request_active_engine(rq, &engine);
424 i915_request_put(rq);
433 static void kill_engines(struct i915_gem_engines *engines, bool ban)
435 struct i915_gem_engines_iter it;
436 struct intel_context *ce;
439 * Map the user's engine back to the actual engines; one virtual
440 * engine will be mapped to multiple engines, and using ctx->engine[]
441 * the same engine may be have multiple instances in the user's map.
442 * However, we only care about pending requests, so only include
443 * engines on which there are incomplete requests.
445 for_each_gem_engine(ce, engines, it) {
446 struct intel_engine_cs *engine;
448 if (ban && intel_context_set_banned(ce))
452 * Check the current active state of this context; if we
453 * are currently executing on the GPU we need to evict
454 * ourselves. On the other hand, if we haven't yet been
455 * submitted to the GPU or if everything is complete,
456 * we have nothing to do.
458 engine = active_engine(ce);
460 /* First attempt to gracefully cancel the context */
461 if (engine && !__cancel_engine(engine) && ban)
463 * If we are unable to send a preemptive pulse to bump
464 * the context from the GPU, we have to resort to a full
465 * reset. We hope the collateral damage is worth it.
467 __reset_context(engines->ctx, engine);
471 static void kill_context(struct i915_gem_context *ctx)
473 bool ban = (!i915_gem_context_is_persistent(ctx) ||
474 !ctx->i915->params.enable_hangcheck);
475 struct i915_gem_engines *pos, *next;
477 spin_lock_irq(&ctx->stale.lock);
478 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
479 list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
480 if (!i915_sw_fence_await(&pos->fence)) {
481 list_del_init(&pos->link);
485 spin_unlock_irq(&ctx->stale.lock);
487 kill_engines(pos, ban);
489 spin_lock_irq(&ctx->stale.lock);
490 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
491 list_safe_reset_next(pos, next, link);
492 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
494 i915_sw_fence_complete(&pos->fence);
496 spin_unlock_irq(&ctx->stale.lock);
499 static void engines_idle_release(struct i915_gem_context *ctx,
500 struct i915_gem_engines *engines)
502 struct i915_gem_engines_iter it;
503 struct intel_context *ce;
505 INIT_LIST_HEAD(&engines->link);
507 engines->ctx = i915_gem_context_get(ctx);
509 for_each_gem_engine(ce, engines, it) {
512 /* serialises with execbuf */
513 set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
514 if (!intel_context_pin_if_active(ce))
517 /* Wait until context is finally scheduled out and retired */
518 err = i915_sw_fence_await_active(&engines->fence,
520 I915_ACTIVE_AWAIT_BARRIER);
521 intel_context_unpin(ce);
526 spin_lock_irq(&ctx->stale.lock);
527 if (!i915_gem_context_is_closed(ctx))
528 list_add_tail(&engines->link, &ctx->stale.engines);
529 spin_unlock_irq(&ctx->stale.lock);
532 if (list_empty(&engines->link)) /* raced, already closed */
533 kill_engines(engines, true);
535 i915_sw_fence_commit(&engines->fence);
538 static void set_closed_name(struct i915_gem_context *ctx)
542 /* Replace '[]' with '<>' to indicate closed in debug prints */
544 s = strrchr(ctx->name, '[');
550 s = strchr(s + 1, ']');
555 static void context_close(struct i915_gem_context *ctx)
557 struct i915_address_space *vm;
559 /* Flush any concurrent set_engines() */
560 mutex_lock(&ctx->engines_mutex);
561 engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
562 i915_gem_context_set_closed(ctx);
563 mutex_unlock(&ctx->engines_mutex);
565 mutex_lock(&ctx->mutex);
567 set_closed_name(ctx);
569 vm = i915_gem_context_vm(ctx);
573 ctx->file_priv = ERR_PTR(-EBADF);
576 * The LUT uses the VMA as a backpointer to unref the object,
577 * so we need to clear the LUT before we close all the VMA (inside
582 spin_lock(&ctx->i915->gem.contexts.lock);
583 list_del(&ctx->link);
584 spin_unlock(&ctx->i915->gem.contexts.lock);
586 mutex_unlock(&ctx->mutex);
589 * If the user has disabled hangchecking, we can not be sure that
590 * the batches will ever complete after the context is closed,
591 * keeping the context and all resources pinned forever. So in this
592 * case we opt to forcibly kill off all remaining requests on
597 i915_gem_context_put(ctx);
600 static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
602 if (i915_gem_context_is_persistent(ctx) == state)
607 * Only contexts that are short-lived [that will expire or be
608 * reset] are allowed to survive past termination. We require
609 * hangcheck to ensure that the persistent requests are healthy.
611 if (!ctx->i915->params.enable_hangcheck)
614 i915_gem_context_set_persistence(ctx);
616 /* To cancel a context we use "preempt-to-idle" */
617 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
621 * If the cancel fails, we then need to reset, cleanly!
623 * If the per-engine reset fails, all hope is lost! We resort
624 * to a full GPU reset in that unlikely case, but realistically
625 * if the engine could not reset, the full reset does not fare
626 * much better. The damage has been done.
628 * However, if we cannot reset an engine by itself, we cannot
629 * cleanup a hanging persistent context without causing
630 * colateral damage, and we should not pretend we can by
631 * exposing the interface.
633 if (!intel_has_reset_engine(&ctx->i915->gt))
636 i915_gem_context_clear_persistence(ctx);
642 static struct i915_gem_context *
643 __create_context(struct drm_i915_private *i915)
645 struct i915_gem_context *ctx;
646 struct i915_gem_engines *e;
650 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
652 return ERR_PTR(-ENOMEM);
654 kref_init(&ctx->ref);
656 ctx->sched.priority = I915_PRIORITY_NORMAL;
657 mutex_init(&ctx->mutex);
658 INIT_LIST_HEAD(&ctx->link);
660 spin_lock_init(&ctx->stale.lock);
661 INIT_LIST_HEAD(&ctx->stale.engines);
663 mutex_init(&ctx->engines_mutex);
664 e = default_engines(ctx);
669 RCU_INIT_POINTER(ctx->engines, e);
671 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
672 mutex_init(&ctx->lut_mutex);
674 /* NB: Mark all slices as needing a remap so that when the context first
675 * loads it will restore whatever remap state already exists. If there
676 * is no remap info, it will be a NOP. */
677 ctx->remap_slice = ALL_L3_SLICES(i915);
679 i915_gem_context_set_bannable(ctx);
680 i915_gem_context_set_recoverable(ctx);
681 __context_set_persistence(ctx, true /* cgroup hook? */);
683 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
684 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
693 static inline struct i915_gem_engines *
694 __context_engines_await(const struct i915_gem_context *ctx,
697 struct i915_gem_engines *engines;
701 engines = rcu_dereference(ctx->engines);
702 GEM_BUG_ON(!engines);
705 *user_engines = i915_gem_context_user_engines(ctx);
707 /* successful await => strong mb */
708 if (unlikely(!i915_sw_fence_await(&engines->fence)))
711 if (likely(engines == rcu_access_pointer(ctx->engines)))
714 i915_sw_fence_complete(&engines->fence);
722 context_apply_all(struct i915_gem_context *ctx,
723 void (*fn)(struct intel_context *ce, void *data),
726 struct i915_gem_engines_iter it;
727 struct i915_gem_engines *e;
728 struct intel_context *ce;
730 e = __context_engines_await(ctx, NULL);
731 for_each_gem_engine(ce, e, it)
733 i915_sw_fence_complete(&e->fence);
736 static void __apply_ppgtt(struct intel_context *ce, void *vm)
739 ce->vm = i915_vm_get(vm);
742 static struct i915_address_space *
743 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
745 struct i915_address_space *old;
747 old = rcu_replace_pointer(ctx->vm,
749 lockdep_is_held(&ctx->mutex));
750 GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
752 context_apply_all(ctx, __apply_ppgtt, vm);
757 static void __assign_ppgtt(struct i915_gem_context *ctx,
758 struct i915_address_space *vm)
760 if (vm == rcu_access_pointer(ctx->vm))
763 vm = __set_ppgtt(ctx, vm);
768 static void __set_timeline(struct intel_timeline **dst,
769 struct intel_timeline *src)
771 struct intel_timeline *old = *dst;
773 *dst = src ? intel_timeline_get(src) : NULL;
776 intel_timeline_put(old);
779 static void __apply_timeline(struct intel_context *ce, void *timeline)
781 __set_timeline(&ce->timeline, timeline);
784 static void __assign_timeline(struct i915_gem_context *ctx,
785 struct intel_timeline *timeline)
787 __set_timeline(&ctx->timeline, timeline);
788 context_apply_all(ctx, __apply_timeline, timeline);
791 static struct i915_gem_context *
792 i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
794 struct i915_gem_context *ctx;
796 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
797 !HAS_EXECLISTS(i915))
798 return ERR_PTR(-EINVAL);
800 ctx = __create_context(i915);
804 if (HAS_FULL_PPGTT(i915)) {
805 struct i915_ppgtt *ppgtt;
807 ppgtt = i915_ppgtt_create(&i915->gt);
809 drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
812 return ERR_CAST(ppgtt);
815 mutex_lock(&ctx->mutex);
816 __assign_ppgtt(ctx, &ppgtt->vm);
817 mutex_unlock(&ctx->mutex);
819 i915_vm_put(&ppgtt->vm);
822 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
823 struct intel_timeline *timeline;
825 timeline = intel_timeline_create(&i915->gt);
826 if (IS_ERR(timeline)) {
828 return ERR_CAST(timeline);
831 __assign_timeline(ctx, timeline);
832 intel_timeline_put(timeline);
835 trace_i915_context_create(ctx);
840 static void init_contexts(struct i915_gem_contexts *gc)
842 spin_lock_init(&gc->lock);
843 INIT_LIST_HEAD(&gc->list);
846 void i915_gem_init__contexts(struct drm_i915_private *i915)
848 init_contexts(&i915->gem.contexts);
851 static int gem_context_register(struct i915_gem_context *ctx,
852 struct drm_i915_file_private *fpriv,
855 struct drm_i915_private *i915 = ctx->i915;
856 struct i915_address_space *vm;
859 ctx->file_priv = fpriv;
861 mutex_lock(&ctx->mutex);
862 vm = i915_gem_context_vm(ctx);
864 WRITE_ONCE(vm->file, fpriv); /* XXX */
865 mutex_unlock(&ctx->mutex);
867 ctx->pid = get_task_pid(current, PIDTYPE_PID);
868 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
869 current->comm, pid_nr(ctx->pid));
871 /* And finally expose ourselves to userspace via the idr */
872 ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
876 spin_lock(&i915->gem.contexts.lock);
877 list_add_tail(&ctx->link, &i915->gem.contexts.list);
878 spin_unlock(&i915->gem.contexts.lock);
883 put_pid(fetch_and_zero(&ctx->pid));
887 int i915_gem_context_open(struct drm_i915_private *i915,
888 struct drm_file *file)
890 struct drm_i915_file_private *file_priv = file->driver_priv;
891 struct i915_gem_context *ctx;
895 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
897 /* 0 reserved for invalid/unassigned ppgtt */
898 xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
900 ctx = i915_gem_create_context(i915, 0);
906 err = gem_context_register(ctx, file_priv, &id);
916 xa_destroy(&file_priv->vm_xa);
917 xa_destroy(&file_priv->context_xa);
921 void i915_gem_context_close(struct drm_file *file)
923 struct drm_i915_file_private *file_priv = file->driver_priv;
924 struct i915_address_space *vm;
925 struct i915_gem_context *ctx;
928 xa_for_each(&file_priv->context_xa, idx, ctx)
930 xa_destroy(&file_priv->context_xa);
932 xa_for_each(&file_priv->vm_xa, idx, vm)
934 xa_destroy(&file_priv->vm_xa);
937 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
938 struct drm_file *file)
940 struct drm_i915_private *i915 = to_i915(dev);
941 struct drm_i915_gem_vm_control *args = data;
942 struct drm_i915_file_private *file_priv = file->driver_priv;
943 struct i915_ppgtt *ppgtt;
947 if (!HAS_FULL_PPGTT(i915))
953 ppgtt = i915_ppgtt_create(&i915->gt);
955 return PTR_ERR(ppgtt);
957 ppgtt->vm.file = file_priv;
959 if (args->extensions) {
960 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
967 err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
968 xa_limit_32b, GFP_KERNEL);
972 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
977 i915_vm_put(&ppgtt->vm);
981 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
982 struct drm_file *file)
984 struct drm_i915_file_private *file_priv = file->driver_priv;
985 struct drm_i915_gem_vm_control *args = data;
986 struct i915_address_space *vm;
991 if (args->extensions)
994 vm = xa_erase(&file_priv->vm_xa, args->vm_id);
1002 struct context_barrier_task {
1003 struct i915_active base;
1004 void (*task)(void *data);
1008 static void cb_retire(struct i915_active *base)
1010 struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
1015 i915_active_fini(&cb->base);
1019 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
1020 static int context_barrier_task(struct i915_gem_context *ctx,
1021 intel_engine_mask_t engines,
1022 bool (*skip)(struct intel_context *ce, void *data),
1023 int (*pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data),
1024 int (*emit)(struct i915_request *rq, void *data),
1025 void (*task)(void *data),
1028 struct context_barrier_task *cb;
1029 struct i915_gem_engines_iter it;
1030 struct i915_gem_engines *e;
1031 struct i915_gem_ww_ctx ww;
1032 struct intel_context *ce;
1037 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1041 i915_active_init(&cb->base, NULL, cb_retire, 0);
1042 err = i915_active_acquire(&cb->base);
1048 e = __context_engines_await(ctx, NULL);
1050 i915_active_release(&cb->base);
1054 for_each_gem_engine(ce, e, it) {
1055 struct i915_request *rq;
1057 if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
1058 ce->engine->mask)) {
1063 if (!(ce->engine->mask & engines))
1066 if (skip && skip(ce, data))
1069 i915_gem_ww_ctx_init(&ww, true);
1071 err = intel_context_pin_ww(ce, &ww);
1076 err = pin(ce, &ww, data);
1080 rq = i915_request_create(ce);
1088 err = emit(rq, data);
1090 err = i915_active_add_request(&cb->base, rq);
1092 i915_request_add(rq);
1094 intel_context_unpin(ce);
1096 if (err == -EDEADLK) {
1097 err = i915_gem_ww_ctx_backoff(&ww);
1101 i915_gem_ww_ctx_fini(&ww);
1106 i915_sw_fence_complete(&e->fence);
1108 cb->task = err ? NULL : task; /* caller needs to unwind instead */
1111 i915_active_release(&cb->base);
1116 static int get_ppgtt(struct drm_i915_file_private *file_priv,
1117 struct i915_gem_context *ctx,
1118 struct drm_i915_gem_context_param *args)
1120 struct i915_address_space *vm;
1124 if (!rcu_access_pointer(ctx->vm))
1128 vm = context_get_vm_rcu(ctx);
1133 err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1139 GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1148 static void set_ppgtt_barrier(void *data)
1150 struct i915_address_space *old = data;
1152 if (GRAPHICS_VER(old->i915) < 8)
1153 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
1158 static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data)
1160 struct i915_address_space *vm = ce->vm;
1162 if (!HAS_LOGICAL_RING_CONTEXTS(vm->i915))
1163 /* ppGTT is not part of the legacy context image */
1164 return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm), ww);
1169 static int emit_ppgtt_update(struct i915_request *rq, void *data)
1171 struct i915_address_space *vm = rq->context->vm;
1172 struct intel_engine_cs *engine = rq->engine;
1173 u32 base = engine->mmio_base;
1177 if (i915_vm_is_4lvl(vm)) {
1178 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1179 const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
1181 cs = intel_ring_begin(rq, 6);
1185 *cs++ = MI_LOAD_REGISTER_IMM(2);
1187 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1188 *cs++ = upper_32_bits(pd_daddr);
1189 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1190 *cs++ = lower_32_bits(pd_daddr);
1193 intel_ring_advance(rq, cs);
1194 } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1195 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1198 /* Magic required to prevent forcewake errors! */
1199 err = engine->emit_flush(rq, EMIT_INVALIDATE);
1203 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1207 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
1208 for (i = GEN8_3LVL_PDPES; i--; ) {
1209 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1211 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1212 *cs++ = upper_32_bits(pd_daddr);
1213 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1214 *cs++ = lower_32_bits(pd_daddr);
1217 intel_ring_advance(rq, cs);
1223 static bool skip_ppgtt_update(struct intel_context *ce, void *data)
1225 if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
1228 return !atomic_read(&ce->pin_count);
1231 static int set_ppgtt(struct drm_i915_file_private *file_priv,
1232 struct i915_gem_context *ctx,
1233 struct drm_i915_gem_context_param *args)
1235 struct i915_address_space *vm, *old;
1241 if (!rcu_access_pointer(ctx->vm))
1244 if (upper_32_bits(args->value))
1248 vm = xa_load(&file_priv->vm_xa, args->value);
1249 if (vm && !kref_get_unless_zero(&vm->ref))
1255 err = mutex_lock_interruptible(&ctx->mutex);
1259 if (i915_gem_context_is_closed(ctx)) {
1264 if (vm == rcu_access_pointer(ctx->vm))
1267 old = __set_ppgtt(ctx, vm);
1269 /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1273 * We need to flush any requests using the current ppgtt before
1274 * we release it as the requests do not hold a reference themselves,
1275 * only indirectly through the context.
1277 err = context_barrier_task(ctx, ALL_ENGINES,
1284 i915_vm_close(__set_ppgtt(ctx, old));
1286 lut_close(ctx); /* force a rebuild of the old obj:vma cache */
1290 mutex_unlock(&ctx->mutex);
1297 i915_gem_user_to_context_sseu(struct intel_gt *gt,
1298 const struct drm_i915_gem_context_param_sseu *user,
1299 struct intel_sseu *context)
1301 const struct sseu_dev_info *device = >->info.sseu;
1302 struct drm_i915_private *i915 = gt->i915;
1304 /* No zeros in any field. */
1305 if (!user->slice_mask || !user->subslice_mask ||
1306 !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1310 if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1314 * Some future proofing on the types since the uAPI is wider than the
1315 * current internal implementation.
1317 if (overflows_type(user->slice_mask, context->slice_mask) ||
1318 overflows_type(user->subslice_mask, context->subslice_mask) ||
1319 overflows_type(user->min_eus_per_subslice,
1320 context->min_eus_per_subslice) ||
1321 overflows_type(user->max_eus_per_subslice,
1322 context->max_eus_per_subslice))
1325 /* Check validity against hardware. */
1326 if (user->slice_mask & ~device->slice_mask)
1329 if (user->subslice_mask & ~device->subslice_mask[0])
1332 if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1335 context->slice_mask = user->slice_mask;
1336 context->subslice_mask = user->subslice_mask;
1337 context->min_eus_per_subslice = user->min_eus_per_subslice;
1338 context->max_eus_per_subslice = user->max_eus_per_subslice;
1340 /* Part specific restrictions. */
1341 if (GRAPHICS_VER(i915) == 11) {
1342 unsigned int hw_s = hweight8(device->slice_mask);
1343 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1344 unsigned int req_s = hweight8(context->slice_mask);
1345 unsigned int req_ss = hweight8(context->subslice_mask);
1348 * Only full subslice enablement is possible if more than one
1349 * slice is turned on.
1351 if (req_s > 1 && req_ss != hw_ss_per_s)
1355 * If more than four (SScount bitfield limit) subslices are
1356 * requested then the number has to be even.
1358 if (req_ss > 4 && (req_ss & 1))
1362 * If only one slice is enabled and subslice count is below the
1363 * device full enablement, it must be at most half of the all
1364 * available subslices.
1366 if (req_s == 1 && req_ss < hw_ss_per_s &&
1367 req_ss > (hw_ss_per_s / 2))
1370 /* ABI restriction - VME use case only. */
1372 /* All slices or one slice only. */
1373 if (req_s != 1 && req_s != hw_s)
1377 * Half subslices or full enablement only when one slice is
1381 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1384 /* No EU configuration changes. */
1385 if ((user->min_eus_per_subslice !=
1386 device->max_eus_per_subslice) ||
1387 (user->max_eus_per_subslice !=
1388 device->max_eus_per_subslice))
1395 static int set_sseu(struct i915_gem_context *ctx,
1396 struct drm_i915_gem_context_param *args)
1398 struct drm_i915_private *i915 = ctx->i915;
1399 struct drm_i915_gem_context_param_sseu user_sseu;
1400 struct intel_context *ce;
1401 struct intel_sseu sseu;
1402 unsigned long lookup;
1405 if (args->size < sizeof(user_sseu))
1408 if (GRAPHICS_VER(i915) != 11)
1411 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1418 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1422 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1423 lookup |= LOOKUP_USER_INDEX;
1425 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1429 /* Only render engine supports RPCS configuration. */
1430 if (ce->engine->class != RENDER_CLASS) {
1435 ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
1439 ret = intel_context_reconfigure_sseu(ce, sseu);
1443 args->size = sizeof(user_sseu);
1446 intel_context_put(ce);
1450 struct set_engines {
1451 struct i915_gem_context *ctx;
1452 struct i915_gem_engines *engines;
1456 set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1458 struct i915_context_engines_load_balance __user *ext =
1459 container_of_user(base, typeof(*ext), base);
1460 const struct set_engines *set = data;
1461 struct drm_i915_private *i915 = set->ctx->i915;
1462 struct intel_engine_cs *stack[16];
1463 struct intel_engine_cs **siblings;
1464 struct intel_context *ce;
1465 u16 num_siblings, idx;
1469 if (!HAS_EXECLISTS(i915))
1472 if (intel_uc_uses_guc_submission(&i915->gt.uc))
1473 return -ENODEV; /* not implement yet */
1475 if (get_user(idx, &ext->engine_index))
1478 if (idx >= set->engines->num_engines) {
1479 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
1480 idx, set->engines->num_engines);
1484 idx = array_index_nospec(idx, set->engines->num_engines);
1485 if (set->engines->engines[idx]) {
1487 "Invalid placement[%d], already occupied\n", idx);
1491 if (get_user(num_siblings, &ext->num_siblings))
1494 err = check_user_mbz(&ext->flags);
1498 err = check_user_mbz(&ext->mbz64);
1503 if (num_siblings > ARRAY_SIZE(stack)) {
1504 siblings = kmalloc_array(num_siblings,
1511 for (n = 0; n < num_siblings; n++) {
1512 struct i915_engine_class_instance ci;
1514 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1519 siblings[n] = intel_engine_lookup_user(i915,
1521 ci.engine_instance);
1524 "Invalid sibling[%d]: { class:%d, inst:%d }\n",
1525 n, ci.engine_class, ci.engine_instance);
1531 ce = intel_execlists_create_virtual(siblings, n);
1537 intel_context_set_gem(ce, set->ctx);
1539 if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1540 intel_context_put(ce);
1546 if (siblings != stack)
1553 set_engines__bond(struct i915_user_extension __user *base, void *data)
1555 struct i915_context_engines_bond __user *ext =
1556 container_of_user(base, typeof(*ext), base);
1557 const struct set_engines *set = data;
1558 struct drm_i915_private *i915 = set->ctx->i915;
1559 struct i915_engine_class_instance ci;
1560 struct intel_engine_cs *virtual;
1561 struct intel_engine_cs *master;
1565 if (get_user(idx, &ext->virtual_index))
1568 if (idx >= set->engines->num_engines) {
1570 "Invalid index for virtual engine: %d >= %d\n",
1571 idx, set->engines->num_engines);
1575 idx = array_index_nospec(idx, set->engines->num_engines);
1576 if (!set->engines->engines[idx]) {
1577 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
1580 virtual = set->engines->engines[idx]->engine;
1582 err = check_user_mbz(&ext->flags);
1586 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1587 err = check_user_mbz(&ext->mbz64[n]);
1592 if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1595 master = intel_engine_lookup_user(i915,
1596 ci.engine_class, ci.engine_instance);
1599 "Unrecognised master engine: { class:%u, instance:%u }\n",
1600 ci.engine_class, ci.engine_instance);
1604 if (get_user(num_bonds, &ext->num_bonds))
1607 for (n = 0; n < num_bonds; n++) {
1608 struct intel_engine_cs *bond;
1610 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1613 bond = intel_engine_lookup_user(i915,
1615 ci.engine_instance);
1618 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1619 n, ci.engine_class, ci.engine_instance);
1624 * A non-virtual engine has no siblings to choose between; and
1625 * a submit fence will always be directed to the one engine.
1627 if (intel_engine_is_virtual(virtual)) {
1628 err = intel_virtual_engine_attach_bond(virtual,
1639 static const i915_user_extension_fn set_engines__extensions[] = {
1640 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1641 [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1645 set_engines(struct i915_gem_context *ctx,
1646 const struct drm_i915_gem_context_param *args)
1648 struct drm_i915_private *i915 = ctx->i915;
1649 struct i915_context_param_engines __user *user =
1650 u64_to_user_ptr(args->value);
1651 struct set_engines set = { .ctx = ctx };
1652 unsigned int num_engines, n;
1656 if (!args->size) { /* switch back to legacy user_ring_map */
1657 if (!i915_gem_context_user_engines(ctx))
1660 set.engines = default_engines(ctx);
1661 if (IS_ERR(set.engines))
1662 return PTR_ERR(set.engines);
1667 BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1668 if (args->size < sizeof(*user) ||
1669 !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1670 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
1676 * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1677 * first 64 engines defined here.
1679 num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1680 set.engines = alloc_engines(num_engines);
1684 for (n = 0; n < num_engines; n++) {
1685 struct i915_engine_class_instance ci;
1686 struct intel_engine_cs *engine;
1687 struct intel_context *ce;
1689 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1690 __free_engines(set.engines, n);
1694 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1695 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1696 set.engines->engines[n] = NULL;
1700 engine = intel_engine_lookup_user(ctx->i915,
1702 ci.engine_instance);
1705 "Invalid engine[%d]: { class:%d, instance:%d }\n",
1706 n, ci.engine_class, ci.engine_instance);
1707 __free_engines(set.engines, n);
1711 ce = intel_context_create(engine);
1713 __free_engines(set.engines, n);
1717 intel_context_set_gem(ce, ctx);
1719 set.engines->engines[n] = ce;
1721 set.engines->num_engines = num_engines;
1724 if (!get_user(extensions, &user->extensions))
1725 err = i915_user_extensions(u64_to_user_ptr(extensions),
1726 set_engines__extensions,
1727 ARRAY_SIZE(set_engines__extensions),
1730 free_engines(set.engines);
1735 mutex_lock(&ctx->engines_mutex);
1736 if (i915_gem_context_is_closed(ctx)) {
1737 mutex_unlock(&ctx->engines_mutex);
1738 free_engines(set.engines);
1742 i915_gem_context_set_user_engines(ctx);
1744 i915_gem_context_clear_user_engines(ctx);
1745 set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1);
1746 mutex_unlock(&ctx->engines_mutex);
1748 /* Keep track of old engine sets for kill_context() */
1749 engines_idle_release(ctx, set.engines);
1755 get_engines(struct i915_gem_context *ctx,
1756 struct drm_i915_gem_context_param *args)
1758 struct i915_context_param_engines __user *user;
1759 struct i915_gem_engines *e;
1760 size_t n, count, size;
1764 e = __context_engines_await(ctx, &user_engines);
1768 if (!user_engines) {
1769 i915_sw_fence_complete(&e->fence);
1774 count = e->num_engines;
1776 /* Be paranoid in case we have an impedance mismatch */
1777 if (!check_struct_size(user, engines, count, &size)) {
1781 if (overflows_type(size, args->size)) {
1791 if (args->size < size) {
1796 user = u64_to_user_ptr(args->value);
1797 if (put_user(0, &user->extensions)) {
1802 for (n = 0; n < count; n++) {
1803 struct i915_engine_class_instance ci = {
1804 .engine_class = I915_ENGINE_CLASS_INVALID,
1805 .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1808 if (e->engines[n]) {
1809 ci.engine_class = e->engines[n]->engine->uabi_class;
1810 ci.engine_instance = e->engines[n]->engine->uabi_instance;
1813 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1822 i915_sw_fence_complete(&e->fence);
1827 set_persistence(struct i915_gem_context *ctx,
1828 const struct drm_i915_gem_context_param *args)
1833 return __context_set_persistence(ctx, args->value);
1836 static void __apply_priority(struct intel_context *ce, void *arg)
1838 struct i915_gem_context *ctx = arg;
1840 if (!intel_engine_has_timeslices(ce->engine))
1843 if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
1844 intel_context_set_use_semaphores(ce);
1846 intel_context_clear_use_semaphores(ce);
1849 static int set_priority(struct i915_gem_context *ctx,
1850 const struct drm_i915_gem_context_param *args)
1852 s64 priority = args->value;
1857 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1860 if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1861 priority < I915_CONTEXT_MIN_USER_PRIORITY)
1864 if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1865 !capable(CAP_SYS_NICE))
1868 ctx->sched.priority = priority;
1869 context_apply_all(ctx, __apply_priority, ctx);
1874 static int ctx_setparam(struct drm_i915_file_private *fpriv,
1875 struct i915_gem_context *ctx,
1876 struct drm_i915_gem_context_param *args)
1880 switch (args->param) {
1881 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1884 else if (args->value)
1885 i915_gem_context_set_no_error_capture(ctx);
1887 i915_gem_context_clear_no_error_capture(ctx);
1890 case I915_CONTEXT_PARAM_BANNABLE:
1893 else if (!capable(CAP_SYS_ADMIN) && !args->value)
1895 else if (args->value)
1896 i915_gem_context_set_bannable(ctx);
1898 i915_gem_context_clear_bannable(ctx);
1901 case I915_CONTEXT_PARAM_RECOVERABLE:
1904 else if (args->value)
1905 i915_gem_context_set_recoverable(ctx);
1907 i915_gem_context_clear_recoverable(ctx);
1910 case I915_CONTEXT_PARAM_PRIORITY:
1911 ret = set_priority(ctx, args);
1914 case I915_CONTEXT_PARAM_SSEU:
1915 ret = set_sseu(ctx, args);
1918 case I915_CONTEXT_PARAM_VM:
1919 ret = set_ppgtt(fpriv, ctx, args);
1922 case I915_CONTEXT_PARAM_ENGINES:
1923 ret = set_engines(ctx, args);
1926 case I915_CONTEXT_PARAM_PERSISTENCE:
1927 ret = set_persistence(ctx, args);
1930 case I915_CONTEXT_PARAM_NO_ZEROMAP:
1931 case I915_CONTEXT_PARAM_BAN_PERIOD:
1932 case I915_CONTEXT_PARAM_RINGSIZE:
1942 struct i915_gem_context *ctx;
1943 struct drm_i915_file_private *fpriv;
1946 static int create_setparam(struct i915_user_extension __user *ext, void *data)
1948 struct drm_i915_gem_context_create_ext_setparam local;
1949 const struct create_ext *arg = data;
1951 if (copy_from_user(&local, ext, sizeof(local)))
1954 if (local.param.ctx_id)
1957 return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
1960 static int invalid_ext(struct i915_user_extension __user *ext, void *data)
1965 static const i915_user_extension_fn create_extensions[] = {
1966 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
1967 [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext,
1970 static bool client_is_banned(struct drm_i915_file_private *file_priv)
1972 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
1975 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
1976 struct drm_file *file)
1978 struct drm_i915_private *i915 = to_i915(dev);
1979 struct drm_i915_gem_context_create_ext *args = data;
1980 struct create_ext ext_data;
1984 if (!DRIVER_CAPS(i915)->has_logical_contexts)
1987 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
1990 ret = intel_gt_terminally_wedged(&i915->gt);
1994 ext_data.fpriv = file->driver_priv;
1995 if (client_is_banned(ext_data.fpriv)) {
1997 "client %s[%d] banned from creating ctx\n",
1998 current->comm, task_pid_nr(current));
2002 ext_data.ctx = i915_gem_create_context(i915, args->flags);
2003 if (IS_ERR(ext_data.ctx))
2004 return PTR_ERR(ext_data.ctx);
2006 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2007 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2009 ARRAY_SIZE(create_extensions),
2015 ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id);
2020 drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
2025 context_close(ext_data.ctx);
2029 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2030 struct drm_file *file)
2032 struct drm_i915_gem_context_destroy *args = data;
2033 struct drm_i915_file_private *file_priv = file->driver_priv;
2034 struct i915_gem_context *ctx;
2042 ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2050 static int get_sseu(struct i915_gem_context *ctx,
2051 struct drm_i915_gem_context_param *args)
2053 struct drm_i915_gem_context_param_sseu user_sseu;
2054 struct intel_context *ce;
2055 unsigned long lookup;
2058 if (args->size == 0)
2060 else if (args->size < sizeof(user_sseu))
2063 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2070 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2074 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2075 lookup |= LOOKUP_USER_INDEX;
2077 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2081 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2083 intel_context_put(ce);
2087 user_sseu.slice_mask = ce->sseu.slice_mask;
2088 user_sseu.subslice_mask = ce->sseu.subslice_mask;
2089 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2090 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2092 intel_context_unlock_pinned(ce);
2093 intel_context_put(ce);
2095 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2100 args->size = sizeof(user_sseu);
2105 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2106 struct drm_file *file)
2108 struct drm_i915_file_private *file_priv = file->driver_priv;
2109 struct drm_i915_gem_context_param *args = data;
2110 struct i915_gem_context *ctx;
2113 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2117 switch (args->param) {
2118 case I915_CONTEXT_PARAM_GTT_SIZE:
2121 if (rcu_access_pointer(ctx->vm))
2122 args->value = rcu_dereference(ctx->vm)->total;
2124 args->value = to_i915(dev)->ggtt.vm.total;
2128 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2130 args->value = i915_gem_context_no_error_capture(ctx);
2133 case I915_CONTEXT_PARAM_BANNABLE:
2135 args->value = i915_gem_context_is_bannable(ctx);
2138 case I915_CONTEXT_PARAM_RECOVERABLE:
2140 args->value = i915_gem_context_is_recoverable(ctx);
2143 case I915_CONTEXT_PARAM_PRIORITY:
2145 args->value = ctx->sched.priority;
2148 case I915_CONTEXT_PARAM_SSEU:
2149 ret = get_sseu(ctx, args);
2152 case I915_CONTEXT_PARAM_VM:
2153 ret = get_ppgtt(file_priv, ctx, args);
2156 case I915_CONTEXT_PARAM_ENGINES:
2157 ret = get_engines(ctx, args);
2160 case I915_CONTEXT_PARAM_PERSISTENCE:
2162 args->value = i915_gem_context_is_persistent(ctx);
2165 case I915_CONTEXT_PARAM_NO_ZEROMAP:
2166 case I915_CONTEXT_PARAM_BAN_PERIOD:
2167 case I915_CONTEXT_PARAM_RINGSIZE:
2173 i915_gem_context_put(ctx);
2177 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2178 struct drm_file *file)
2180 struct drm_i915_file_private *file_priv = file->driver_priv;
2181 struct drm_i915_gem_context_param *args = data;
2182 struct i915_gem_context *ctx;
2185 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2189 ret = ctx_setparam(file_priv, ctx, args);
2191 i915_gem_context_put(ctx);
2195 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2196 void *data, struct drm_file *file)
2198 struct drm_i915_private *i915 = to_i915(dev);
2199 struct drm_i915_reset_stats *args = data;
2200 struct i915_gem_context *ctx;
2203 if (args->flags || args->pad)
2208 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2213 * We opt for unserialised reads here. This may result in tearing
2214 * in the extremely unlikely event of a GPU hang on this context
2215 * as we are querying them. If we need that extra layer of protection,
2216 * we should wrap the hangstats with a seqlock.
2219 if (capable(CAP_SYS_ADMIN))
2220 args->reset_count = i915_reset_count(&i915->gpu_error);
2222 args->reset_count = 0;
2224 args->batch_active = atomic_read(&ctx->guilty_count);
2225 args->batch_pending = atomic_read(&ctx->active_count);
2233 /* GEM context-engines iterator: for_each_gem_engine() */
2234 struct intel_context *
2235 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2237 const struct i915_gem_engines *e = it->engines;
2238 struct intel_context *ctx;
2244 if (it->idx >= e->num_engines)
2247 ctx = e->engines[it->idx++];
2253 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2254 #include "selftests/mock_context.c"
2255 #include "selftests/i915_gem_context.c"
2258 static void i915_global_gem_context_shrink(void)
2260 kmem_cache_shrink(global.slab_luts);
2263 static void i915_global_gem_context_exit(void)
2265 kmem_cache_destroy(global.slab_luts);
2268 static struct i915_global_gem_context global = { {
2269 .shrink = i915_global_gem_context_shrink,
2270 .exit = i915_global_gem_context_exit,
2273 int __init i915_global_gem_context_init(void)
2275 global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2276 if (!global.slab_luts)
2279 i915_global_register(&global.base);