2 * Copyright © 2011-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
29 * This file implements HW context support. On gen5+ a HW context consists of an
30 * opaque GPU object which is referenced at times of context saves and restores.
31 * With RC6 enabled, the context is also referenced as the GPU enters and exists
32 * from RC6 (GPU has it's own internal power context, except on gen5). Though
33 * something like a context does exist for the media ring, the code only
34 * supports contexts for the render ring.
36 * In software, there is a distinction between contexts created by the user,
37 * and the default HW context. The default HW context is used by GPU clients
38 * that do not request setup of their own hardware context. The default
39 * context's state is never restored to help prevent programming errors. This
40 * would happen if a client ran and piggy-backed off another clients GPU state.
41 * The default context only exists to give the GPU some offset to load as the
42 * current to invoke a save of the context we actually care about. In fact, the
43 * code could likely be constructed, albeit in a more complicated fashion, to
44 * never use the default context, though that limits the driver's ability to
45 * swap out, and/or destroy other contexts.
47 * All other contexts are created as a request by the GPU client. These contexts
48 * store GPU state, and thus allow GPU clients to not re-emit state (and
49 * potentially query certain state) at any time. The kernel driver makes
50 * certain that the appropriate commands are inserted.
52 * The context life cycle is semi-complicated in that context BOs may live
53 * longer than the context itself because of the way the hardware, and object
54 * tracking works. Below is a very crude representation of the state machine
55 * describing the context life.
56 * refcount pincount active
57 * S0: initial state 0 0 0
58 * S1: context created 1 0 0
59 * S2: context is currently running 2 1 X
60 * S3: GPU referenced, but not current 2 0 1
61 * S4: context is current, but destroyed 1 1 0
62 * S5: like S3, but destroyed 1 0 1
64 * The most common (but not all) transitions:
65 * S0->S1: client creates a context
66 * S1->S2: client submits execbuf with context
67 * S2->S3: other clients submits execbuf with context
68 * S3->S1: context object was retired
69 * S3->S2: clients submits another execbuf
70 * S2->S4: context destroy called with current context
71 * S3->S5->S0: destroy path
72 * S4->S5->S0: destroy path on current context
74 * There are two confusing terms used above:
75 * The "current context" means the context which is currently running on the
76 * GPU. The GPU has loaded its state already and has stored away the gtt
77 * offset of the BO. The GPU is not actively referencing the data at this
78 * offset, but it will on the next context switch. The only way to avoid this
79 * is to do a GPU reset.
81 * An "active context' is one which was previously the "current context" and is
82 * on the active list waiting for the next context switch to occur. Until this
83 * happens, the object must remain at the same gtt offset. It is therefore
84 * possible to destroy a context, but it is still active.
88 #include <linux/log2.h>
90 #include <drm/i915_drm.h>
92 #include "i915_trace.h"
93 #include "intel_workarounds.h"
95 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
97 static void lut_close(struct i915_gem_context *ctx)
99 struct i915_lut_handle *lut, *ln;
100 struct radix_tree_iter iter;
103 list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
104 list_del(&lut->obj_link);
105 kmem_cache_free(ctx->i915->luts, lut);
109 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
110 struct i915_vma *vma = rcu_dereference_raw(*slot);
112 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
113 __i915_gem_object_release_unless_active(vma->obj);
118 static void i915_gem_context_free(struct i915_gem_context *ctx)
122 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
123 GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
125 i915_ppgtt_put(ctx->ppgtt);
127 for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
128 struct intel_context *ce = &ctx->__engine[n];
133 WARN_ON(ce->pin_count);
135 intel_ring_free(ce->ring);
137 __i915_gem_object_release_unless_active(ce->state->obj);
143 list_del(&ctx->link);
145 ida_simple_remove(&ctx->i915->contexts.hw_ida, ctx->hw_id);
149 static void contexts_free(struct drm_i915_private *i915)
151 struct llist_node *freed = llist_del_all(&i915->contexts.free_list);
152 struct i915_gem_context *ctx, *cn;
154 lockdep_assert_held(&i915->drm.struct_mutex);
156 llist_for_each_entry_safe(ctx, cn, freed, free_link)
157 i915_gem_context_free(ctx);
160 static void contexts_free_first(struct drm_i915_private *i915)
162 struct i915_gem_context *ctx;
163 struct llist_node *freed;
165 lockdep_assert_held(&i915->drm.struct_mutex);
167 freed = llist_del_first(&i915->contexts.free_list);
171 ctx = container_of(freed, typeof(*ctx), free_link);
172 i915_gem_context_free(ctx);
175 static void contexts_free_worker(struct work_struct *work)
177 struct drm_i915_private *i915 =
178 container_of(work, typeof(*i915), contexts.free_work);
180 mutex_lock(&i915->drm.struct_mutex);
182 mutex_unlock(&i915->drm.struct_mutex);
185 void i915_gem_context_release(struct kref *ref)
187 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
188 struct drm_i915_private *i915 = ctx->i915;
190 trace_i915_context_free(ctx);
191 if (llist_add(&ctx->free_link, &i915->contexts.free_list))
192 queue_work(i915->wq, &i915->contexts.free_work);
195 static void context_close(struct i915_gem_context *ctx)
197 i915_gem_context_set_closed(ctx);
200 * The LUT uses the VMA as a backpointer to unref the object,
201 * so we need to clear the LUT before we close all the VMA (inside
206 i915_ppgtt_close(&ctx->ppgtt->base);
208 ctx->file_priv = ERR_PTR(-EBADF);
209 i915_gem_context_put(ctx);
212 static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
217 if (INTEL_GEN(dev_priv) >= 11)
218 max = GEN11_MAX_CONTEXT_HW_ID;
220 max = MAX_CONTEXT_HW_ID;
222 ret = ida_simple_get(&dev_priv->contexts.hw_ida,
225 /* Contexts are only released when no longer active.
226 * Flush any pending retires to hopefully release some
227 * stale contexts and try again.
229 i915_retire_requests(dev_priv);
230 ret = ida_simple_get(&dev_priv->contexts.hw_ida,
240 static u32 default_desc_template(const struct drm_i915_private *i915,
241 const struct i915_hw_ppgtt *ppgtt)
246 desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
248 address_mode = INTEL_LEGACY_32B_CONTEXT;
249 if (ppgtt && i915_vm_is_48bit(&ppgtt->base))
250 address_mode = INTEL_LEGACY_64B_CONTEXT;
251 desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
254 desc |= GEN8_CTX_L3LLC_COHERENT;
256 /* TODO: WaDisableLiteRestore when we start using semaphore
257 * signalling between Command Streamers
258 * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
264 static struct i915_gem_context *
265 __create_hw_context(struct drm_i915_private *dev_priv,
266 struct drm_i915_file_private *file_priv)
268 struct i915_gem_context *ctx;
271 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
273 return ERR_PTR(-ENOMEM);
275 ret = assign_hw_id(dev_priv, &ctx->hw_id);
281 kref_init(&ctx->ref);
282 list_add_tail(&ctx->link, &dev_priv->contexts.list);
283 ctx->i915 = dev_priv;
284 ctx->sched.priority = I915_PRIORITY_NORMAL;
286 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
287 INIT_LIST_HEAD(&ctx->handles_list);
289 /* Default context will never have a file_priv */
290 ret = DEFAULT_CONTEXT_HANDLE;
292 ret = idr_alloc(&file_priv->context_idr, ctx,
293 DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
297 ctx->user_handle = ret;
299 ctx->file_priv = file_priv;
301 ctx->pid = get_task_pid(current, PIDTYPE_PID);
302 ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
312 /* NB: Mark all slices as needing a remap so that when the context first
313 * loads it will restore whatever remap state already exists. If there
314 * is no remap info, it will be a NOP. */
315 ctx->remap_slice = ALL_L3_SLICES(dev_priv);
317 i915_gem_context_set_bannable(ctx);
318 ctx->ring_size = 4 * PAGE_SIZE;
320 default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
323 * GuC requires the ring to be placed in Non-WOPCM memory. If GuC is not
324 * present or not in use we still need a small bias as ring wraparound
325 * at offset 0 sometimes hangs. No idea why.
327 if (USES_GUC(dev_priv))
328 ctx->ggtt_offset_bias = dev_priv->guc.ggtt_pin_bias;
330 ctx->ggtt_offset_bias = I915_GTT_PAGE_SIZE;
336 idr_remove(&file_priv->context_idr, ctx->user_handle);
342 static void __destroy_hw_context(struct i915_gem_context *ctx,
343 struct drm_i915_file_private *file_priv)
345 idr_remove(&file_priv->context_idr, ctx->user_handle);
349 static struct i915_gem_context *
350 i915_gem_create_context(struct drm_i915_private *dev_priv,
351 struct drm_i915_file_private *file_priv)
353 struct i915_gem_context *ctx;
355 lockdep_assert_held(&dev_priv->drm.struct_mutex);
357 /* Reap the most stale context */
358 contexts_free_first(dev_priv);
360 ctx = __create_hw_context(dev_priv, file_priv);
364 if (USES_FULL_PPGTT(dev_priv)) {
365 struct i915_hw_ppgtt *ppgtt;
367 ppgtt = i915_ppgtt_create(dev_priv, file_priv, ctx->name);
369 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
371 __destroy_hw_context(ctx, file_priv);
372 return ERR_CAST(ppgtt);
376 ctx->desc_template = default_desc_template(dev_priv, ppgtt);
379 trace_i915_context_create(ctx);
385 * i915_gem_context_create_gvt - create a GVT GEM context
388 * This function is used to create a GVT specific GEM context.
391 * pointer to i915_gem_context on success, error pointer if failed
394 struct i915_gem_context *
395 i915_gem_context_create_gvt(struct drm_device *dev)
397 struct i915_gem_context *ctx;
400 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
401 return ERR_PTR(-ENODEV);
403 ret = i915_mutex_lock_interruptible(dev);
407 ctx = __create_hw_context(to_i915(dev), NULL);
411 ctx->file_priv = ERR_PTR(-EBADF);
412 i915_gem_context_set_closed(ctx); /* not user accessible */
413 i915_gem_context_clear_bannable(ctx);
414 i915_gem_context_set_force_single_submission(ctx);
415 if (!USES_GUC_SUBMISSION(to_i915(dev)))
416 ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
418 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
420 mutex_unlock(&dev->struct_mutex);
424 struct i915_gem_context *
425 i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
427 struct i915_gem_context *ctx;
429 ctx = i915_gem_create_context(i915, NULL);
433 i915_gem_context_clear_bannable(ctx);
434 ctx->sched.priority = prio;
435 ctx->ring_size = PAGE_SIZE;
437 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
443 destroy_kernel_context(struct i915_gem_context **ctxp)
445 struct i915_gem_context *ctx;
447 /* Keep the context ref so that we can free it immediately ourselves */
448 ctx = i915_gem_context_get(fetch_and_zero(ctxp));
449 GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
452 i915_gem_context_free(ctx);
455 static bool needs_preempt_context(struct drm_i915_private *i915)
457 return HAS_LOGICAL_RING_PREEMPTION(i915);
460 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
462 struct i915_gem_context *ctx;
465 /* Reassure ourselves we are only called once */
466 GEM_BUG_ON(dev_priv->kernel_context);
467 GEM_BUG_ON(dev_priv->preempt_context);
469 ret = intel_ctx_workarounds_init(dev_priv);
473 INIT_LIST_HEAD(&dev_priv->contexts.list);
474 INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker);
475 init_llist_head(&dev_priv->contexts.free_list);
477 /* Using the simple ida interface, the max is limited by sizeof(int) */
478 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
479 BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX);
480 ida_init(&dev_priv->contexts.hw_ida);
482 /* lowest priority; idle task */
483 ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN);
485 DRM_ERROR("Failed to create default global context\n");
489 * For easy recognisablity, we want the kernel context to be 0 and then
490 * all user contexts will have non-zero hw_id.
492 GEM_BUG_ON(ctx->hw_id);
493 dev_priv->kernel_context = ctx;
495 /* highest priority; preempting task */
496 if (needs_preempt_context(dev_priv)) {
497 ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
499 dev_priv->preempt_context = ctx;
501 DRM_ERROR("Failed to create preempt context; disabling preemption\n");
504 DRM_DEBUG_DRIVER("%s context support initialized\n",
505 dev_priv->engine[RCS]->context_size ? "logical" :
510 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
512 struct intel_engine_cs *engine;
513 enum intel_engine_id id;
515 lockdep_assert_held(&dev_priv->drm.struct_mutex);
517 for_each_engine(engine, dev_priv, id) {
518 engine->legacy_active_context = NULL;
519 engine->legacy_active_ppgtt = NULL;
521 if (!engine->last_retired_context)
524 intel_context_unpin(engine->last_retired_context, engine);
525 engine->last_retired_context = NULL;
529 void i915_gem_contexts_fini(struct drm_i915_private *i915)
531 lockdep_assert_held(&i915->drm.struct_mutex);
533 if (i915->preempt_context)
534 destroy_kernel_context(&i915->preempt_context);
535 destroy_kernel_context(&i915->kernel_context);
537 /* Must free all deferred contexts (via flush_workqueue) first */
538 ida_destroy(&i915->contexts.hw_ida);
541 static int context_idr_cleanup(int id, void *p, void *data)
543 struct i915_gem_context *ctx = p;
549 int i915_gem_context_open(struct drm_i915_private *i915,
550 struct drm_file *file)
552 struct drm_i915_file_private *file_priv = file->driver_priv;
553 struct i915_gem_context *ctx;
555 idr_init(&file_priv->context_idr);
557 mutex_lock(&i915->drm.struct_mutex);
558 ctx = i915_gem_create_context(i915, file_priv);
559 mutex_unlock(&i915->drm.struct_mutex);
561 idr_destroy(&file_priv->context_idr);
565 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
570 void i915_gem_context_close(struct drm_file *file)
572 struct drm_i915_file_private *file_priv = file->driver_priv;
574 lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
576 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
577 idr_destroy(&file_priv->context_idr);
580 static struct i915_request *
581 last_request_on_engine(struct i915_timeline *timeline,
582 struct intel_engine_cs *engine)
584 struct i915_request *rq;
586 if (timeline == &engine->timeline)
589 rq = i915_gem_active_raw(&timeline->last_request,
590 &engine->i915->drm.struct_mutex);
591 if (rq && rq->engine == engine)
597 static bool engine_has_idle_kernel_context(struct intel_engine_cs *engine)
599 struct i915_timeline *timeline;
601 list_for_each_entry(timeline, &engine->i915->gt.timelines, link) {
602 if (last_request_on_engine(timeline, engine))
606 return intel_engine_has_kernel_context(engine);
609 int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
611 struct intel_engine_cs *engine;
612 struct i915_timeline *timeline;
613 enum intel_engine_id id;
615 lockdep_assert_held(&dev_priv->drm.struct_mutex);
617 i915_retire_requests(dev_priv);
619 for_each_engine(engine, dev_priv, id) {
620 struct i915_request *rq;
622 if (engine_has_idle_kernel_context(engine))
625 rq = i915_request_alloc(engine, dev_priv->kernel_context);
629 /* Queue this switch after all other activity */
630 list_for_each_entry(timeline, &dev_priv->gt.timelines, link) {
631 struct i915_request *prev;
633 prev = last_request_on_engine(timeline, engine);
635 i915_sw_fence_await_sw_fence_gfp(&rq->submit,
641 * Force a flush after the switch to ensure that all rendering
642 * and operations prior to switching to the kernel context hits
643 * memory. This should be guaranteed by the previous request,
644 * but an extra layer of paranoia before we declare the system
645 * idle (on suspend etc) is advisable!
647 __i915_request_add(rq, true);
653 static bool client_is_banned(struct drm_i915_file_private *file_priv)
655 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
658 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
659 struct drm_file *file)
661 struct drm_i915_private *dev_priv = to_i915(dev);
662 struct drm_i915_gem_context_create *args = data;
663 struct drm_i915_file_private *file_priv = file->driver_priv;
664 struct i915_gem_context *ctx;
667 if (!dev_priv->engine[RCS]->context_size)
673 if (client_is_banned(file_priv)) {
674 DRM_DEBUG("client %s[%d] banned from creating ctx\n",
676 pid_nr(get_task_pid(current, PIDTYPE_PID)));
681 ret = i915_mutex_lock_interruptible(dev);
685 ctx = i915_gem_create_context(dev_priv, file_priv);
686 mutex_unlock(&dev->struct_mutex);
690 GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
692 args->ctx_id = ctx->user_handle;
693 DRM_DEBUG("HW context %d created\n", args->ctx_id);
698 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
699 struct drm_file *file)
701 struct drm_i915_gem_context_destroy *args = data;
702 struct drm_i915_file_private *file_priv = file->driver_priv;
703 struct i915_gem_context *ctx;
709 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
712 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
716 ret = mutex_lock_interruptible(&dev->struct_mutex);
720 __destroy_hw_context(ctx, file_priv);
721 mutex_unlock(&dev->struct_mutex);
724 i915_gem_context_put(ctx);
728 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
729 struct drm_file *file)
731 struct drm_i915_file_private *file_priv = file->driver_priv;
732 struct drm_i915_gem_context_param *args = data;
733 struct i915_gem_context *ctx;
736 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
741 switch (args->param) {
742 case I915_CONTEXT_PARAM_BAN_PERIOD:
745 case I915_CONTEXT_PARAM_NO_ZEROMAP:
746 args->value = ctx->flags & CONTEXT_NO_ZEROMAP;
748 case I915_CONTEXT_PARAM_GTT_SIZE:
750 args->value = ctx->ppgtt->base.total;
751 else if (to_i915(dev)->mm.aliasing_ppgtt)
752 args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
754 args->value = to_i915(dev)->ggtt.base.total;
756 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
757 args->value = i915_gem_context_no_error_capture(ctx);
759 case I915_CONTEXT_PARAM_BANNABLE:
760 args->value = i915_gem_context_is_bannable(ctx);
762 case I915_CONTEXT_PARAM_PRIORITY:
763 args->value = ctx->sched.priority;
770 i915_gem_context_put(ctx);
774 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
775 struct drm_file *file)
777 struct drm_i915_file_private *file_priv = file->driver_priv;
778 struct drm_i915_gem_context_param *args = data;
779 struct i915_gem_context *ctx;
782 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
786 ret = i915_mutex_lock_interruptible(dev);
790 switch (args->param) {
791 case I915_CONTEXT_PARAM_BAN_PERIOD:
794 case I915_CONTEXT_PARAM_NO_ZEROMAP:
798 ctx->flags &= ~CONTEXT_NO_ZEROMAP;
799 ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
802 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
805 else if (args->value)
806 i915_gem_context_set_no_error_capture(ctx);
808 i915_gem_context_clear_no_error_capture(ctx);
810 case I915_CONTEXT_PARAM_BANNABLE:
813 else if (!capable(CAP_SYS_ADMIN) && !args->value)
815 else if (args->value)
816 i915_gem_context_set_bannable(ctx);
818 i915_gem_context_clear_bannable(ctx);
821 case I915_CONTEXT_PARAM_PRIORITY:
823 s64 priority = args->value;
827 else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
829 else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
830 priority < I915_CONTEXT_MIN_USER_PRIORITY)
832 else if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
833 !capable(CAP_SYS_NICE))
836 ctx->sched.priority = priority;
844 mutex_unlock(&dev->struct_mutex);
847 i915_gem_context_put(ctx);
851 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
852 void *data, struct drm_file *file)
854 struct drm_i915_private *dev_priv = to_i915(dev);
855 struct drm_i915_reset_stats *args = data;
856 struct i915_gem_context *ctx;
859 if (args->flags || args->pad)
864 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
869 * We opt for unserialised reads here. This may result in tearing
870 * in the extremely unlikely event of a GPU hang on this context
871 * as we are querying them. If we need that extra layer of protection,
872 * we should wrap the hangstats with a seqlock.
875 if (capable(CAP_SYS_ADMIN))
876 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
878 args->reset_count = 0;
880 args->batch_active = atomic_read(&ctx->guilty_count);
881 args->batch_pending = atomic_read(&ctx->active_count);
889 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
890 #include "selftests/mock_context.c"
891 #include "selftests/i915_gem_context.c"