2 * Copyright © 2011-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
29 * This file implements HW context support. On gen5+ a HW context consists of an
30 * opaque GPU object which is referenced at times of context saves and restores.
31 * With RC6 enabled, the context is also referenced as the GPU enters and exists
32 * from RC6 (GPU has it's own internal power context, except on gen5). Though
33 * something like a context does exist for the media ring, the code only
34 * supports contexts for the render ring.
36 * In software, there is a distinction between contexts created by the user,
37 * and the default HW context. The default HW context is used by GPU clients
38 * that do not request setup of their own hardware context. The default
39 * context's state is never restored to help prevent programming errors. This
40 * would happen if a client ran and piggy-backed off another clients GPU state.
41 * The default context only exists to give the GPU some offset to load as the
42 * current to invoke a save of the context we actually care about. In fact, the
43 * code could likely be constructed, albeit in a more complicated fashion, to
44 * never use the default context, though that limits the driver's ability to
45 * swap out, and/or destroy other contexts.
47 * All other contexts are created as a request by the GPU client. These contexts
48 * store GPU state, and thus allow GPU clients to not re-emit state (and
49 * potentially query certain state) at any time. The kernel driver makes
50 * certain that the appropriate commands are inserted.
52 * The context life cycle is semi-complicated in that context BOs may live
53 * longer than the context itself because of the way the hardware, and object
54 * tracking works. Below is a very crude representation of the state machine
55 * describing the context life.
56 * refcount pincount active
57 * S0: initial state 0 0 0
58 * S1: context created 1 0 0
59 * S2: context is currently running 2 1 X
60 * S3: GPU referenced, but not current 2 0 1
61 * S4: context is current, but destroyed 1 1 0
62 * S5: like S3, but destroyed 1 0 1
64 * The most common (but not all) transitions:
65 * S0->S1: client creates a context
66 * S1->S2: client submits execbuf with context
67 * S2->S3: other clients submits execbuf with context
68 * S3->S1: context object was retired
69 * S3->S2: clients submits another execbuf
70 * S2->S4: context destroy called with current context
71 * S3->S5->S0: destroy path
72 * S4->S5->S0: destroy path on current context
74 * There are two confusing terms used above:
75 * The "current context" means the context which is currently running on the
76 * GPU. The GPU has loaded its state already and has stored away the gtt
77 * offset of the BO. The GPU is not actively referencing the data at this
78 * offset, but it will on the next context switch. The only way to avoid this
79 * is to do a GPU reset.
81 * An "active context' is one which was previously the "current context" and is
82 * on the active list waiting for the next context switch to occur. Until this
83 * happens, the object must remain at the same gtt offset. It is therefore
84 * possible to destroy a context, but it is still active.
88 #include <linux/log2.h>
90 #include <drm/i915_drm.h>
92 #include "i915_trace.h"
94 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
96 static void lut_close(struct i915_gem_context *ctx)
98 struct i915_lut_handle *lut, *ln;
99 struct radix_tree_iter iter;
102 list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
103 list_del(&lut->obj_link);
104 kmem_cache_free(ctx->i915->luts, lut);
108 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
109 struct i915_vma *vma = rcu_dereference_raw(*slot);
111 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
112 __i915_gem_object_release_unless_active(vma->obj);
117 static void i915_gem_context_free(struct i915_gem_context *ctx)
121 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
122 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
124 i915_ppgtt_put(ctx->ppgtt);
126 for (i = 0; i < I915_NUM_ENGINES; i++) {
127 struct intel_context *ce = &ctx->engine[i];
132 WARN_ON(ce->pin_count);
134 intel_ring_free(ce->ring);
136 __i915_gem_object_release_unless_active(ce->state->obj);
142 list_del(&ctx->link);
144 ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id);
148 static void contexts_free(struct drm_i915_private *i915)
150 struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
151 struct i915_gem_context *ctx, *cn;
153 lockdep_assert_held(&i915->drm.struct_mutex);
155 llist_for_each_entry_safe(ctx, cn, freed, free_link)
156 i915_gem_context_free(ctx);
159 static void contexts_free_first(struct drm_i915_private *i915)
161 struct i915_gem_context *ctx;
162 struct llist_node *freed;
164 lockdep_assert_held(&i915->drm.struct_mutex);
166 freed = llist_del_first(&i915->contexts.free_list);
170 ctx = container_of(freed, typeof(*ctx), free_link);
171 i915_gem_context_free(ctx);
174 static void contexts_free_worker(struct work_struct *work)
176 struct drm_i915_private *i915 =
177 container_of(work, typeof(*i915), contexts.free_work);
179 mutex_lock(&i915->drm.struct_mutex);
181 mutex_unlock(&i915->drm.struct_mutex);
184 void i915_gem_context_release(struct kref *ref)
186 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
187 struct drm_i915_private *i915 = ctx->i915;
189 trace_i915_context_free(ctx);
190 if (llist_add(&ctx->free_link, &i915->contexts.free_list))
191 queue_work(i915->wq, &i915->contexts.free_work);
194 static void context_close(struct i915_gem_context *ctx)
196 i915_gem_context_set_closed(ctx);
199 * The LUT uses the VMA as a backpointer to unref the object,
200 * so we need to clear the LUT before we close all the VMA (inside
205 i915_ppgtt_close(&ctx->ppgtt->base);
207 ctx->file_priv = ERR_PTR(-EBADF);
208 i915_gem_context_put(ctx);
211 static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
215 ret = ida_simple_get(&dev_priv->contexts.hw_ida,
216 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
218 /* Contexts are only released when no longer active.
219 * Flush any pending retires to hopefully release some
220 * stale contexts and try again.
222 i915_gem_retire_requests(dev_priv);
223 ret = ida_simple_get(&dev_priv->contexts.hw_ida,
224 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
233 static u32 default_desc_template(const struct drm_i915_private *i915,
234 const struct i915_hw_ppgtt *ppgtt)
239 desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
241 address_mode = INTEL_LEGACY_32B_CONTEXT;
242 if (ppgtt && i915_vm_is_48bit(&ppgtt->base))
243 address_mode = INTEL_LEGACY_64B_CONTEXT;
244 desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
247 desc |= GEN8_CTX_L3LLC_COHERENT;
249 /* TODO: WaDisableLiteRestore when we start using semaphore
250 * signalling between Command Streamers
251 * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
257 static struct i915_gem_context *
258 __create_hw_context(struct drm_i915_private *dev_priv,
259 struct drm_i915_file_private *file_priv)
261 struct i915_gem_context *ctx;
264 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
266 return ERR_PTR(-ENOMEM);
268 ret = assign_hw_id(dev_priv, &ctx->hw_id);
274 kref_init(&ctx->ref);
275 list_add_tail(&ctx->link, &dev_priv->contexts.list);
276 ctx->i915 = dev_priv;
277 ctx->priority = I915_PRIORITY_NORMAL;
279 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
280 INIT_LIST_HEAD(&ctx->handles_list);
282 /* Default context will never have a file_priv */
283 ret = DEFAULT_CONTEXT_HANDLE;
285 ret = idr_alloc(&file_priv->context_idr, ctx,
286 DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
290 ctx->user_handle = ret;
292 ctx->file_priv = file_priv;
294 ctx->pid = get_task_pid(current, PIDTYPE_PID);
295 ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
305 /* NB: Mark all slices as needing a remap so that when the context first
306 * loads it will restore whatever remap state already exists. If there
307 * is no remap info, it will be a NOP. */
308 ctx->remap_slice = ALL_L3_SLICES(dev_priv);
310 i915_gem_context_set_bannable(ctx);
311 ctx->ring_size = 4 * PAGE_SIZE;
313 default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
315 /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
316 * present or not in use we still need a small bias as ring wraparound
317 * at offset 0 sometimes hangs. No idea why.
319 if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading)
320 ctx->ggtt_offset_bias = GUC_WOPCM_TOP;
322 ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
328 idr_remove(&file_priv->context_idr, ctx->user_handle);
334 static void __destroy_hw_context(struct i915_gem_context *ctx,
335 struct drm_i915_file_private *file_priv)
337 idr_remove(&file_priv->context_idr, ctx->user_handle);
342 * The default context needs to exist per ring that uses contexts. It stores the
343 * context state of the GPU for applications that don't utilize HW contexts, as
344 * well as an idle case.
346 static struct i915_gem_context *
347 i915_gem_create_context(struct drm_i915_private *dev_priv,
348 struct drm_i915_file_private *file_priv)
350 struct i915_gem_context *ctx;
352 lockdep_assert_held(&dev_priv->drm.struct_mutex);
354 /* Reap the most stale context */
355 contexts_free_first(dev_priv);
357 ctx = __create_hw_context(dev_priv, file_priv);
361 if (USES_FULL_PPGTT(dev_priv)) {
362 struct i915_hw_ppgtt *ppgtt;
364 ppgtt = i915_ppgtt_create(dev_priv, file_priv, ctx->name);
366 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
368 __destroy_hw_context(ctx, file_priv);
369 return ERR_CAST(ppgtt);
373 ctx->desc_template = default_desc_template(dev_priv, ppgtt);
376 trace_i915_context_create(ctx);
382 * i915_gem_context_create_gvt - create a GVT GEM context
385 * This function is used to create a GVT specific GEM context.
388 * pointer to i915_gem_context on success, error pointer if failed
391 struct i915_gem_context *
392 i915_gem_context_create_gvt(struct drm_device *dev)
394 struct i915_gem_context *ctx;
397 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
398 return ERR_PTR(-ENODEV);
400 ret = i915_mutex_lock_interruptible(dev);
404 ctx = __create_hw_context(to_i915(dev), NULL);
408 ctx->file_priv = ERR_PTR(-EBADF);
409 i915_gem_context_set_closed(ctx); /* not user accessible */
410 i915_gem_context_clear_bannable(ctx);
411 i915_gem_context_set_force_single_submission(ctx);
412 if (!i915_modparams.enable_guc_submission)
413 ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
415 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
417 mutex_unlock(&dev->struct_mutex);
421 static struct i915_gem_context *
422 create_kernel_context(struct drm_i915_private *i915, int prio)
424 struct i915_gem_context *ctx;
426 ctx = i915_gem_create_context(i915, NULL);
430 i915_gem_context_clear_bannable(ctx);
431 ctx->priority = prio;
432 ctx->ring_size = PAGE_SIZE;
434 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
440 destroy_kernel_context(struct i915_gem_context **ctxp)
442 struct i915_gem_context *ctx;
444 /* Keep the context ref so that we can free it immediately ourselves */
445 ctx = i915_gem_context_get(fetch_and_zero(ctxp));
446 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
449 i915_gem_context_free(ctx);
452 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
454 struct i915_gem_context *ctx;
457 GEM_BUG_ON(dev_priv->kernel_context);
459 INIT_LIST_HEAD(&dev_priv->contexts.list);
460 INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
461 init_llist_head(&dev_priv->contexts.free_list);
463 if (intel_vgpu_active(dev_priv) &&
464 HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
465 if (!i915_modparams.enable_execlists) {
466 DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
471 /* Using the simple ida interface, the max is limited by sizeof(int) */
472 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
473 ida_init(&dev_priv->contexts.hw_ida);
475 /* lowest priority; idle task */
476 ctx = create_kernel_context(dev_priv, I915_PRIORITY_MIN);
478 DRM_ERROR("Failed to create default global context\n");
483 * For easy recognisablity, we want the kernel context to be 0 and then
484 * all user contexts will have non-zero hw_id.
486 GEM_BUG_ON(ctx->hw_id);
487 dev_priv->kernel_context = ctx;
489 /* highest priority; preempting task */
490 ctx = create_kernel_context(dev_priv, INT_MAX);
492 DRM_ERROR("Failed to create default preempt context\n");
494 goto err_kernel_context;
496 dev_priv->preempt_context = ctx;
498 DRM_DEBUG_DRIVER("%s context support initialized\n",
499 dev_priv->engine[RCS]->context_size ? "logical" :
504 destroy_kernel_context(&dev_priv->kernel_context);
509 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
511 struct intel_engine_cs *engine;
512 enum intel_engine_id id;
514 lockdep_assert_held(&dev_priv->drm.struct_mutex);
516 for_each_engine(engine, dev_priv, id) {
517 engine->legacy_active_context = NULL;
519 if (!engine->last_retired_context)
522 engine->context_unpin(engine, engine->last_retired_context);
523 engine->last_retired_context = NULL;
526 /* Force the GPU state to be restored on enabling */
527 if (!i915_modparams.enable_execlists) {
528 struct i915_gem_context *ctx;
530 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
531 if (!i915_gem_context_is_default(ctx))
534 for_each_engine(engine, dev_priv, id)
535 ctx->engine[engine->id].initialised = false;
537 ctx->remap_slice = ALL_L3_SLICES(dev_priv);
540 for_each_engine(engine, dev_priv, id) {
541 struct intel_context *kce =
542 &dev_priv->kernel_context->engine[engine->id];
544 kce->initialised = true;
549 void i915_gem_contexts_fini(struct drm_i915_private *i915)
551 lockdep_assert_held(&i915->drm.struct_mutex);
553 destroy_kernel_context(&i915->preempt_context);
554 destroy_kernel_context(&i915->kernel_context);
556 /* Must free all deferred contexts (via flush_workqueue) first */
557 ida_destroy(&i915->contexts.hw_ida);
560 static int context_idr_cleanup(int id, void *p, void *data)
562 struct i915_gem_context *ctx = p;
568 int i915_gem_context_open(struct drm_i915_private *i915,
569 struct drm_file *file)
571 struct drm_i915_file_private *file_priv = file->driver_priv;
572 struct i915_gem_context *ctx;
574 idr_init(&file_priv->context_idr);
576 mutex_lock(&i915->drm.struct_mutex);
577 ctx = i915_gem_create_context(i915, file_priv);
578 mutex_unlock(&i915->drm.struct_mutex);
580 idr_destroy(&file_priv->context_idr);
584 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
589 void i915_gem_context_close(struct drm_file *file)
591 struct drm_i915_file_private *file_priv = file->driver_priv;
593 lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
595 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
596 idr_destroy(&file_priv->context_idr);
600 mi_set_context(struct drm_i915_gem_request *req, u32 flags)
602 struct drm_i915_private *dev_priv = req->i915;
603 struct intel_engine_cs *engine = req->engine;
604 enum intel_engine_id id;
605 const int num_rings =
606 /* Use an extended w/a on gen7 if signalling from other rings */
607 (i915_modparams.semaphores && INTEL_GEN(dev_priv) == 7) ?
608 INTEL_INFO(dev_priv)->num_rings - 1 :
613 flags |= MI_MM_SPACE_GTT;
614 if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
615 /* These flags are for resource streamer on HSW+ */
616 flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
618 flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
621 if (INTEL_GEN(dev_priv) >= 7)
622 len += 2 + (num_rings ? 4*num_rings + 6 : 0);
624 cs = intel_ring_begin(req, len);
628 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
629 if (INTEL_GEN(dev_priv) >= 7) {
630 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
632 struct intel_engine_cs *signaller;
634 *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
635 for_each_engine(signaller, dev_priv, id) {
636 if (signaller == engine)
639 *cs++ = i915_mmio_reg_offset(
640 RING_PSMI_CTL(signaller->mmio_base));
641 *cs++ = _MASKED_BIT_ENABLE(
642 GEN6_PSMI_SLEEP_MSG_DISABLE);
648 *cs++ = MI_SET_CONTEXT;
649 *cs++ = i915_ggtt_offset(req->ctx->engine[RCS].state) | flags;
651 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
652 * WaMiSetContext_Hang:snb,ivb,vlv
656 if (INTEL_GEN(dev_priv) >= 7) {
658 struct intel_engine_cs *signaller;
659 i915_reg_t last_reg = {}; /* keep gcc quiet */
661 *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
662 for_each_engine(signaller, dev_priv, id) {
663 if (signaller == engine)
666 last_reg = RING_PSMI_CTL(signaller->mmio_base);
667 *cs++ = i915_mmio_reg_offset(last_reg);
668 *cs++ = _MASKED_BIT_DISABLE(
669 GEN6_PSMI_SLEEP_MSG_DISABLE);
672 /* Insert a delay before the next switch! */
673 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
674 *cs++ = i915_mmio_reg_offset(last_reg);
675 *cs++ = i915_ggtt_offset(engine->scratch);
678 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
681 intel_ring_advance(req, cs);
686 static int remap_l3(struct drm_i915_gem_request *req, int slice)
688 u32 *cs, *remap_info = req->i915->l3_parity.remap_info[slice];
694 cs = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
699 * Note: We do not worry about the concurrent register cacheline hang
700 * here because no other code should access these registers other than
701 * at initialization time.
703 *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
704 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
705 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
706 *cs++ = remap_info[i];
709 intel_ring_advance(req, cs);
714 static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
715 struct intel_engine_cs *engine,
716 struct i915_gem_context *to)
721 if (!to->engine[RCS].initialised)
724 if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
727 return to == engine->legacy_active_context;
731 needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *engine)
733 struct i915_gem_context *from = engine->legacy_active_context;
738 /* Always load the ppgtt on first use */
742 /* Same context without new entries, skip */
743 if ((!from->ppgtt || from->ppgtt == ppgtt) &&
744 !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
747 if (engine->id != RCS)
750 if (INTEL_GEN(engine->i915) < 8)
757 needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
758 struct i915_gem_context *to,
764 if (!IS_GEN8(to->i915))
767 if (hw_flags & MI_RESTORE_INHIBIT)
773 static int do_rcs_switch(struct drm_i915_gem_request *req)
775 struct i915_gem_context *to = req->ctx;
776 struct intel_engine_cs *engine = req->engine;
777 struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
778 struct i915_gem_context *from = engine->legacy_active_context;
782 GEM_BUG_ON(engine->id != RCS);
784 if (skip_rcs_switch(ppgtt, engine, to))
787 if (needs_pd_load_pre(ppgtt, engine)) {
788 /* Older GENs and non render rings still want the load first,
789 * "PP_DCLV followed by PP_DIR_BASE register through Load
790 * Register Immediate commands in Ring Buffer before submitting
792 trace_switch_mm(engine, to);
793 ret = ppgtt->switch_mm(ppgtt, req);
798 if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
799 /* NB: If we inhibit the restore, the context is not allowed to
800 * die because future work may end up depending on valid address
801 * space. This means we must enforce that a page table load
802 * occur when this occurs. */
803 hw_flags = MI_RESTORE_INHIBIT;
804 else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
805 hw_flags = MI_FORCE_RESTORE;
809 if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
810 ret = mi_set_context(req, hw_flags);
814 engine->legacy_active_context = to;
817 /* GEN8 does *not* require an explicit reload if the PDPs have been
818 * setup, and we do not wish to move them.
820 if (needs_pd_load_post(ppgtt, to, hw_flags)) {
821 trace_switch_mm(engine, to);
822 ret = ppgtt->switch_mm(ppgtt, req);
823 /* The hardware context switch is emitted, but we haven't
824 * actually changed the state - so it's probably safe to bail
825 * here. Still, let the user know something dangerous has
833 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
835 for (i = 0; i < MAX_L3_SLICES; i++) {
836 if (!(to->remap_slice & (1<<i)))
839 ret = remap_l3(req, i);
843 to->remap_slice &= ~(1<<i);
846 if (!to->engine[RCS].initialised) {
847 if (engine->init_context) {
848 ret = engine->init_context(req);
852 to->engine[RCS].initialised = true;
859 * i915_switch_context() - perform a GPU context switch.
860 * @req: request for which we'll execute the context switch
862 * The context life cycle is simple. The context refcount is incremented and
863 * decremented by 1 and create and destroy. If the context is in use by the GPU,
864 * it will have a refcount > 1. This allows us to destroy the context abstract
865 * object while letting the normal object tracking destroy the backing BO.
867 * This function should not be used in execlists mode. Instead the context is
868 * switched by writing to the ELSP and requests keep a reference to their
871 int i915_switch_context(struct drm_i915_gem_request *req)
873 struct intel_engine_cs *engine = req->engine;
875 lockdep_assert_held(&req->i915->drm.struct_mutex);
876 if (i915_modparams.enable_execlists)
879 if (!req->ctx->engine[engine->id].state) {
880 struct i915_gem_context *to = req->ctx;
881 struct i915_hw_ppgtt *ppgtt =
882 to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
884 if (needs_pd_load_pre(ppgtt, engine)) {
887 trace_switch_mm(engine, to);
888 ret = ppgtt->switch_mm(ppgtt, req);
892 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
895 engine->legacy_active_context = to;
899 return do_rcs_switch(req);
902 static bool engine_has_kernel_context(struct intel_engine_cs *engine)
904 struct i915_gem_timeline *timeline;
906 list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
907 struct intel_timeline *tl;
909 if (timeline == &engine->i915->gt.global_timeline)
912 tl = &timeline->engine[engine->id];
913 if (i915_gem_active_peek(&tl->last_request,
914 &engine->i915->drm.struct_mutex))
918 return (!engine->last_retired_context ||
919 i915_gem_context_is_kernel(engine->last_retired_context));
922 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
924 struct intel_engine_cs *engine;
925 struct i915_gem_timeline *timeline;
926 enum intel_engine_id id;
928 lockdep_assert_held(&dev_priv->drm.struct_mutex);
930 i915_gem_retire_requests(dev_priv);
932 for_each_engine(engine, dev_priv, id) {
933 struct drm_i915_gem_request *req;
936 if (engine_has_kernel_context(engine))
939 req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
943 /* Queue this switch after all other activity */
944 list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
945 struct drm_i915_gem_request *prev;
946 struct intel_timeline *tl;
948 tl = &timeline->engine[engine->id];
949 prev = i915_gem_active_raw(&tl->last_request,
950 &dev_priv->drm.struct_mutex);
952 i915_sw_fence_await_sw_fence_gfp(&req->submit,
957 ret = i915_switch_context(req);
958 i915_add_request(req);
966 static bool client_is_banned(struct drm_i915_file_private *file_priv)
968 return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS;
971 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
972 struct drm_file *file)
974 struct drm_i915_private *dev_priv = to_i915(dev);
975 struct drm_i915_gem_context_create *args = data;
976 struct drm_i915_file_private *file_priv = file->driver_priv;
977 struct i915_gem_context *ctx;
980 if (!dev_priv->engine[RCS]->context_size)
986 if (client_is_banned(file_priv)) {
987 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
989 pid_nr(get_task_pid(current, PIDTYPE_PID)));
994 ret = i915_mutex_lock_interruptible(dev);
998 ctx = i915_gem_create_context(dev_priv, file_priv);
999 mutex_unlock(&dev->struct_mutex);
1001 return PTR_ERR(ctx);
1003 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
1005 args->ctx_id = ctx->user_handle;
1006 DRM_DEBUG("HW context %d created\n", args->ctx_id);
1011 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
1012 struct drm_file *file)
1014 struct drm_i915_gem_context_destroy *args = data;
1015 struct drm_i915_file_private *file_priv = file->driver_priv;
1016 struct i915_gem_context *ctx;
1022 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
1025 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1029 ret = mutex_lock_interruptible(&dev->struct_mutex);
1033 __destroy_hw_context(ctx, file_priv);
1034 mutex_unlock(&dev->struct_mutex);
1037 i915_gem_context_put(ctx);
1041 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
1042 struct drm_file *file)
1044 struct drm_i915_file_private *file_priv = file->driver_priv;
1045 struct drm_i915_gem_context_param *args = data;
1046 struct i915_gem_context *ctx;
1049 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1054 switch (args->param) {
1055 case I915_CONTEXT_PARAM_BAN_PERIOD:
1058 case I915_CONTEXT_PARAM_NO_ZEROMAP:
1059 args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
1061 case I915_CONTEXT_PARAM_GTT_SIZE:
1063 args->value = ctx->ppgtt->base.total;
1064 else if (to_i915(dev)->mm.aliasing_ppgtt)
1065 args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
1067 args->value = to_i915(dev)->ggtt.base.total;
1069 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1070 args->value = i915_gem_context_no_error_capture(ctx);
1072 case I915_CONTEXT_PARAM_BANNABLE:
1073 args->value = i915_gem_context_is_bannable(ctx);
1075 case I915_CONTEXT_PARAM_PRIORITY:
1076 args->value = ctx->priority;
1083 i915_gem_context_put(ctx);
1087 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
1088 struct drm_file *file)
1090 struct drm_i915_file_private *file_priv = file->driver_priv;
1091 struct drm_i915_gem_context_param *args = data;
1092 struct i915_gem_context *ctx;
1095 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
1099 ret = i915_mutex_lock_interruptible(dev);
1103 switch (args->param) {
1104 case I915_CONTEXT_PARAM_BAN_PERIOD:
1107 case I915_CONTEXT_PARAM_NO_ZEROMAP:
1111 ctx->flags &= ~CONTEXT_NO_ZEROMAP;
1112 ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
1115 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1118 else if (args->value)
1119 i915_gem_context_set_no_error_capture(ctx);
1121 i915_gem_context_clear_no_error_capture(ctx);
1123 case I915_CONTEXT_PARAM_BANNABLE:
1126 else if (!capable(CAP_SYS_ADMIN) && !args->value)
1128 else if (args->value)
1129 i915_gem_context_set_bannable(ctx);
1131 i915_gem_context_clear_bannable(ctx);
1134 case I915_CONTEXT_PARAM_PRIORITY:
1136 int priority = args->value;
1140 else if (!to_i915(dev)->engine[RCS]->schedule)
1142 else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1143 priority < I915_CONTEXT_MIN_USER_PRIORITY)
1145 else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1146 !capable(CAP_SYS_NICE))
1149 ctx->priority = priority;
1157 mutex_unlock(&dev->struct_mutex);
1160 i915_gem_context_put(ctx);
1164 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
1165 void *data, struct drm_file *file)
1167 struct drm_i915_private *dev_priv = to_i915(dev);
1168 struct drm_i915_reset_stats *args = data;
1169 struct i915_gem_context *ctx;
1172 if (args->flags || args->pad)
1177 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
1182 * We opt for unserialised reads here. This may result in tearing
1183 * in the extremely unlikely event of a GPU hang on this context
1184 * as we are querying them. If we need that extra layer of protection,
1185 * we should wrap the hangstats with a seqlock.
1188 if (capable(CAP_SYS_ADMIN))
1189 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1191 args->reset_count = 0;
1193 args->batch_active = atomic_read(&ctx->guilty_count);
1194 args->batch_pending = atomic_read(&ctx->active_count);
1202 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1203 #include "selftests/mock_context.c"
1204 #include "selftests/i915_gem_context.c"