drm/i915: Drop the CONTEXT_CLONE API (v2)
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gem / i915_gem_context.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2011-2012 Intel Corporation
5  */
6
7 /*
8  * This file implements HW context support. On gen5+ a HW context consists of an
9  * opaque GPU object which is referenced at times of context saves and restores.
10  * With RC6 enabled, the context is also referenced as the GPU enters and exists
11  * from RC6 (GPU has it's own internal power context, except on gen5). Though
12  * something like a context does exist for the media ring, the code only
13  * supports contexts for the render ring.
14  *
15  * In software, there is a distinction between contexts created by the user,
16  * and the default HW context. The default HW context is used by GPU clients
17  * that do not request setup of their own hardware context. The default
18  * context's state is never restored to help prevent programming errors. This
19  * would happen if a client ran and piggy-backed off another clients GPU state.
20  * The default context only exists to give the GPU some offset to load as the
21  * current to invoke a save of the context we actually care about. In fact, the
22  * code could likely be constructed, albeit in a more complicated fashion, to
23  * never use the default context, though that limits the driver's ability to
24  * swap out, and/or destroy other contexts.
25  *
26  * All other contexts are created as a request by the GPU client. These contexts
27  * store GPU state, and thus allow GPU clients to not re-emit state (and
28  * potentially query certain state) at any time. The kernel driver makes
29  * certain that the appropriate commands are inserted.
30  *
31  * The context life cycle is semi-complicated in that context BOs may live
32  * longer than the context itself because of the way the hardware, and object
33  * tracking works. Below is a very crude representation of the state machine
34  * describing the context life.
35  *                                         refcount     pincount     active
36  * S0: initial state                          0            0           0
37  * S1: context created                        1            0           0
38  * S2: context is currently running           2            1           X
39  * S3: GPU referenced, but not current        2            0           1
40  * S4: context is current, but destroyed      1            1           0
41  * S5: like S3, but destroyed                 1            0           1
42  *
43  * The most common (but not all) transitions:
44  * S0->S1: client creates a context
45  * S1->S2: client submits execbuf with context
46  * S2->S3: other clients submits execbuf with context
47  * S3->S1: context object was retired
48  * S3->S2: clients submits another execbuf
49  * S2->S4: context destroy called with current context
50  * S3->S5->S0: destroy path
51  * S4->S5->S0: destroy path on current context
52  *
53  * There are two confusing terms used above:
54  *  The "current context" means the context which is currently running on the
55  *  GPU. The GPU has loaded its state already and has stored away the gtt
56  *  offset of the BO. The GPU is not actively referencing the data at this
57  *  offset, but it will on the next context switch. The only way to avoid this
58  *  is to do a GPU reset.
59  *
60  *  An "active context' is one which was previously the "current context" and is
61  *  on the active list waiting for the next context switch to occur. Until this
62  *  happens, the object must remain at the same gtt offset. It is therefore
63  *  possible to destroy a context, but it is still active.
64  *
65  */
66
67 #include <linux/log2.h>
68 #include <linux/nospec.h>
69
70 #include "gt/gen6_ppgtt.h"
71 #include "gt/intel_context.h"
72 #include "gt/intel_context_param.h"
73 #include "gt/intel_engine_heartbeat.h"
74 #include "gt/intel_engine_user.h"
75 #include "gt/intel_execlists_submission.h" /* virtual_engine */
76 #include "gt/intel_gpu_commands.h"
77 #include "gt/intel_ring.h"
78
79 #include "i915_gem_context.h"
80 #include "i915_globals.h"
81 #include "i915_trace.h"
82 #include "i915_user_extensions.h"
83
84 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
85
86 static struct i915_global_gem_context {
87         struct i915_global base;
88         struct kmem_cache *slab_luts;
89 } global;
90
91 struct i915_lut_handle *i915_lut_handle_alloc(void)
92 {
93         return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
94 }
95
96 void i915_lut_handle_free(struct i915_lut_handle *lut)
97 {
98         return kmem_cache_free(global.slab_luts, lut);
99 }
100
101 static void lut_close(struct i915_gem_context *ctx)
102 {
103         struct radix_tree_iter iter;
104         void __rcu **slot;
105
106         mutex_lock(&ctx->lut_mutex);
107         rcu_read_lock();
108         radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
109                 struct i915_vma *vma = rcu_dereference_raw(*slot);
110                 struct drm_i915_gem_object *obj = vma->obj;
111                 struct i915_lut_handle *lut;
112
113                 if (!kref_get_unless_zero(&obj->base.refcount))
114                         continue;
115
116                 spin_lock(&obj->lut_lock);
117                 list_for_each_entry(lut, &obj->lut_list, obj_link) {
118                         if (lut->ctx != ctx)
119                                 continue;
120
121                         if (lut->handle != iter.index)
122                                 continue;
123
124                         list_del(&lut->obj_link);
125                         break;
126                 }
127                 spin_unlock(&obj->lut_lock);
128
129                 if (&lut->obj_link != &obj->lut_list) {
130                         i915_lut_handle_free(lut);
131                         radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
132                         i915_vma_close(vma);
133                         i915_gem_object_put(obj);
134                 }
135
136                 i915_gem_object_put(obj);
137         }
138         rcu_read_unlock();
139         mutex_unlock(&ctx->lut_mutex);
140 }
141
142 static struct intel_context *
143 lookup_user_engine(struct i915_gem_context *ctx,
144                    unsigned long flags,
145                    const struct i915_engine_class_instance *ci)
146 #define LOOKUP_USER_INDEX BIT(0)
147 {
148         int idx;
149
150         if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx))
151                 return ERR_PTR(-EINVAL);
152
153         if (!i915_gem_context_user_engines(ctx)) {
154                 struct intel_engine_cs *engine;
155
156                 engine = intel_engine_lookup_user(ctx->i915,
157                                                   ci->engine_class,
158                                                   ci->engine_instance);
159                 if (!engine)
160                         return ERR_PTR(-EINVAL);
161
162                 idx = engine->legacy_idx;
163         } else {
164                 idx = ci->engine_instance;
165         }
166
167         return i915_gem_context_get_engine(ctx, idx);
168 }
169
170 static struct i915_address_space *
171 context_get_vm_rcu(struct i915_gem_context *ctx)
172 {
173         GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
174
175         do {
176                 struct i915_address_space *vm;
177
178                 /*
179                  * We do not allow downgrading from full-ppgtt [to a shared
180                  * global gtt], so ctx->vm cannot become NULL.
181                  */
182                 vm = rcu_dereference(ctx->vm);
183                 if (!kref_get_unless_zero(&vm->ref))
184                         continue;
185
186                 /*
187                  * This ppgtt may have be reallocated between
188                  * the read and the kref, and reassigned to a third
189                  * context. In order to avoid inadvertent sharing
190                  * of this ppgtt with that third context (and not
191                  * src), we have to confirm that we have the same
192                  * ppgtt after passing through the strong memory
193                  * barrier implied by a successful
194                  * kref_get_unless_zero().
195                  *
196                  * Once we have acquired the current ppgtt of ctx,
197                  * we no longer care if it is released from ctx, as
198                  * it cannot be reallocated elsewhere.
199                  */
200
201                 if (vm == rcu_access_pointer(ctx->vm))
202                         return rcu_pointer_handoff(vm);
203
204                 i915_vm_put(vm);
205         } while (1);
206 }
207
208 static void intel_context_set_gem(struct intel_context *ce,
209                                   struct i915_gem_context *ctx)
210 {
211         GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
212         RCU_INIT_POINTER(ce->gem_context, ctx);
213
214         ce->ring_size = SZ_16K;
215
216         if (rcu_access_pointer(ctx->vm)) {
217                 struct i915_address_space *vm;
218
219                 rcu_read_lock();
220                 vm = context_get_vm_rcu(ctx); /* hmm */
221                 rcu_read_unlock();
222
223                 i915_vm_put(ce->vm);
224                 ce->vm = vm;
225         }
226
227         GEM_BUG_ON(ce->timeline);
228         if (ctx->timeline)
229                 ce->timeline = intel_timeline_get(ctx->timeline);
230
231         if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
232             intel_engine_has_timeslices(ce->engine))
233                 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
234
235         if (IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) &&
236             ctx->i915->params.request_timeout_ms) {
237                 unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
238
239                 intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
240         }
241 }
242
243 static void __free_engines(struct i915_gem_engines *e, unsigned int count)
244 {
245         while (count--) {
246                 if (!e->engines[count])
247                         continue;
248
249                 intel_context_put(e->engines[count]);
250         }
251         kfree(e);
252 }
253
254 static void free_engines(struct i915_gem_engines *e)
255 {
256         __free_engines(e, e->num_engines);
257 }
258
259 static void free_engines_rcu(struct rcu_head *rcu)
260 {
261         struct i915_gem_engines *engines =
262                 container_of(rcu, struct i915_gem_engines, rcu);
263
264         i915_sw_fence_fini(&engines->fence);
265         free_engines(engines);
266 }
267
268 static int __i915_sw_fence_call
269 engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
270 {
271         struct i915_gem_engines *engines =
272                 container_of(fence, typeof(*engines), fence);
273
274         switch (state) {
275         case FENCE_COMPLETE:
276                 if (!list_empty(&engines->link)) {
277                         struct i915_gem_context *ctx = engines->ctx;
278                         unsigned long flags;
279
280                         spin_lock_irqsave(&ctx->stale.lock, flags);
281                         list_del(&engines->link);
282                         spin_unlock_irqrestore(&ctx->stale.lock, flags);
283                 }
284                 i915_gem_context_put(engines->ctx);
285                 break;
286
287         case FENCE_FREE:
288                 init_rcu_head(&engines->rcu);
289                 call_rcu(&engines->rcu, free_engines_rcu);
290                 break;
291         }
292
293         return NOTIFY_DONE;
294 }
295
296 static struct i915_gem_engines *alloc_engines(unsigned int count)
297 {
298         struct i915_gem_engines *e;
299
300         e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
301         if (!e)
302                 return NULL;
303
304         i915_sw_fence_init(&e->fence, engines_notify);
305         return e;
306 }
307
308 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
309 {
310         const struct intel_gt *gt = &ctx->i915->gt;
311         struct intel_engine_cs *engine;
312         struct i915_gem_engines *e;
313         enum intel_engine_id id;
314
315         e = alloc_engines(I915_NUM_ENGINES);
316         if (!e)
317                 return ERR_PTR(-ENOMEM);
318
319         for_each_engine(engine, gt, id) {
320                 struct intel_context *ce;
321
322                 if (engine->legacy_idx == INVALID_ENGINE)
323                         continue;
324
325                 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
326                 GEM_BUG_ON(e->engines[engine->legacy_idx]);
327
328                 ce = intel_context_create(engine);
329                 if (IS_ERR(ce)) {
330                         __free_engines(e, e->num_engines + 1);
331                         return ERR_CAST(ce);
332                 }
333
334                 intel_context_set_gem(ce, ctx);
335
336                 e->engines[engine->legacy_idx] = ce;
337                 e->num_engines = max(e->num_engines, engine->legacy_idx);
338         }
339         e->num_engines++;
340
341         return e;
342 }
343
344 void i915_gem_context_release(struct kref *ref)
345 {
346         struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
347
348         trace_i915_context_free(ctx);
349         GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
350
351         mutex_destroy(&ctx->engines_mutex);
352         mutex_destroy(&ctx->lut_mutex);
353
354         if (ctx->timeline)
355                 intel_timeline_put(ctx->timeline);
356
357         put_pid(ctx->pid);
358         mutex_destroy(&ctx->mutex);
359
360         kfree_rcu(ctx, rcu);
361 }
362
363 static inline struct i915_gem_engines *
364 __context_engines_static(const struct i915_gem_context *ctx)
365 {
366         return rcu_dereference_protected(ctx->engines, true);
367 }
368
369 static void __reset_context(struct i915_gem_context *ctx,
370                             struct intel_engine_cs *engine)
371 {
372         intel_gt_handle_error(engine->gt, engine->mask, 0,
373                               "context closure in %s", ctx->name);
374 }
375
376 static bool __cancel_engine(struct intel_engine_cs *engine)
377 {
378         /*
379          * Send a "high priority pulse" down the engine to cause the
380          * current request to be momentarily preempted. (If it fails to
381          * be preempted, it will be reset). As we have marked our context
382          * as banned, any incomplete request, including any running, will
383          * be skipped following the preemption.
384          *
385          * If there is no hangchecking (one of the reasons why we try to
386          * cancel the context) and no forced preemption, there may be no
387          * means by which we reset the GPU and evict the persistent hog.
388          * Ergo if we are unable to inject a preemptive pulse that can
389          * kill the banned context, we fallback to doing a local reset
390          * instead.
391          */
392         return intel_engine_pulse(engine) == 0;
393 }
394
395 static struct intel_engine_cs *active_engine(struct intel_context *ce)
396 {
397         struct intel_engine_cs *engine = NULL;
398         struct i915_request *rq;
399
400         if (intel_context_has_inflight(ce))
401                 return intel_context_inflight(ce);
402
403         if (!ce->timeline)
404                 return NULL;
405
406         /*
407          * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
408          * to the request to prevent it being transferred to a new timeline
409          * (and onto a new timeline->requests list).
410          */
411         rcu_read_lock();
412         list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
413                 bool found;
414
415                 /* timeline is already completed upto this point? */
416                 if (!i915_request_get_rcu(rq))
417                         break;
418
419                 /* Check with the backend if the request is inflight */
420                 found = true;
421                 if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
422                         found = i915_request_active_engine(rq, &engine);
423
424                 i915_request_put(rq);
425                 if (found)
426                         break;
427         }
428         rcu_read_unlock();
429
430         return engine;
431 }
432
433 static void kill_engines(struct i915_gem_engines *engines, bool ban)
434 {
435         struct i915_gem_engines_iter it;
436         struct intel_context *ce;
437
438         /*
439          * Map the user's engine back to the actual engines; one virtual
440          * engine will be mapped to multiple engines, and using ctx->engine[]
441          * the same engine may be have multiple instances in the user's map.
442          * However, we only care about pending requests, so only include
443          * engines on which there are incomplete requests.
444          */
445         for_each_gem_engine(ce, engines, it) {
446                 struct intel_engine_cs *engine;
447
448                 if (ban && intel_context_set_banned(ce))
449                         continue;
450
451                 /*
452                  * Check the current active state of this context; if we
453                  * are currently executing on the GPU we need to evict
454                  * ourselves. On the other hand, if we haven't yet been
455                  * submitted to the GPU or if everything is complete,
456                  * we have nothing to do.
457                  */
458                 engine = active_engine(ce);
459
460                 /* First attempt to gracefully cancel the context */
461                 if (engine && !__cancel_engine(engine) && ban)
462                         /*
463                          * If we are unable to send a preemptive pulse to bump
464                          * the context from the GPU, we have to resort to a full
465                          * reset. We hope the collateral damage is worth it.
466                          */
467                         __reset_context(engines->ctx, engine);
468         }
469 }
470
471 static void kill_context(struct i915_gem_context *ctx)
472 {
473         bool ban = (!i915_gem_context_is_persistent(ctx) ||
474                     !ctx->i915->params.enable_hangcheck);
475         struct i915_gem_engines *pos, *next;
476
477         spin_lock_irq(&ctx->stale.lock);
478         GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
479         list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
480                 if (!i915_sw_fence_await(&pos->fence)) {
481                         list_del_init(&pos->link);
482                         continue;
483                 }
484
485                 spin_unlock_irq(&ctx->stale.lock);
486
487                 kill_engines(pos, ban);
488
489                 spin_lock_irq(&ctx->stale.lock);
490                 GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
491                 list_safe_reset_next(pos, next, link);
492                 list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
493
494                 i915_sw_fence_complete(&pos->fence);
495         }
496         spin_unlock_irq(&ctx->stale.lock);
497 }
498
499 static void engines_idle_release(struct i915_gem_context *ctx,
500                                  struct i915_gem_engines *engines)
501 {
502         struct i915_gem_engines_iter it;
503         struct intel_context *ce;
504
505         INIT_LIST_HEAD(&engines->link);
506
507         engines->ctx = i915_gem_context_get(ctx);
508
509         for_each_gem_engine(ce, engines, it) {
510                 int err;
511
512                 /* serialises with execbuf */
513                 set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
514                 if (!intel_context_pin_if_active(ce))
515                         continue;
516
517                 /* Wait until context is finally scheduled out and retired */
518                 err = i915_sw_fence_await_active(&engines->fence,
519                                                  &ce->active,
520                                                  I915_ACTIVE_AWAIT_BARRIER);
521                 intel_context_unpin(ce);
522                 if (err)
523                         goto kill;
524         }
525
526         spin_lock_irq(&ctx->stale.lock);
527         if (!i915_gem_context_is_closed(ctx))
528                 list_add_tail(&engines->link, &ctx->stale.engines);
529         spin_unlock_irq(&ctx->stale.lock);
530
531 kill:
532         if (list_empty(&engines->link)) /* raced, already closed */
533                 kill_engines(engines, true);
534
535         i915_sw_fence_commit(&engines->fence);
536 }
537
538 static void set_closed_name(struct i915_gem_context *ctx)
539 {
540         char *s;
541
542         /* Replace '[]' with '<>' to indicate closed in debug prints */
543
544         s = strrchr(ctx->name, '[');
545         if (!s)
546                 return;
547
548         *s = '<';
549
550         s = strchr(s + 1, ']');
551         if (s)
552                 *s = '>';
553 }
554
555 static void context_close(struct i915_gem_context *ctx)
556 {
557         struct i915_address_space *vm;
558
559         /* Flush any concurrent set_engines() */
560         mutex_lock(&ctx->engines_mutex);
561         engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
562         i915_gem_context_set_closed(ctx);
563         mutex_unlock(&ctx->engines_mutex);
564
565         mutex_lock(&ctx->mutex);
566
567         set_closed_name(ctx);
568
569         vm = i915_gem_context_vm(ctx);
570         if (vm)
571                 i915_vm_close(vm);
572
573         ctx->file_priv = ERR_PTR(-EBADF);
574
575         /*
576          * The LUT uses the VMA as a backpointer to unref the object,
577          * so we need to clear the LUT before we close all the VMA (inside
578          * the ppgtt).
579          */
580         lut_close(ctx);
581
582         spin_lock(&ctx->i915->gem.contexts.lock);
583         list_del(&ctx->link);
584         spin_unlock(&ctx->i915->gem.contexts.lock);
585
586         mutex_unlock(&ctx->mutex);
587
588         /*
589          * If the user has disabled hangchecking, we can not be sure that
590          * the batches will ever complete after the context is closed,
591          * keeping the context and all resources pinned forever. So in this
592          * case we opt to forcibly kill off all remaining requests on
593          * context close.
594          */
595         kill_context(ctx);
596
597         i915_gem_context_put(ctx);
598 }
599
600 static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
601 {
602         if (i915_gem_context_is_persistent(ctx) == state)
603                 return 0;
604
605         if (state) {
606                 /*
607                  * Only contexts that are short-lived [that will expire or be
608                  * reset] are allowed to survive past termination. We require
609                  * hangcheck to ensure that the persistent requests are healthy.
610                  */
611                 if (!ctx->i915->params.enable_hangcheck)
612                         return -EINVAL;
613
614                 i915_gem_context_set_persistence(ctx);
615         } else {
616                 /* To cancel a context we use "preempt-to-idle" */
617                 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
618                         return -ENODEV;
619
620                 /*
621                  * If the cancel fails, we then need to reset, cleanly!
622                  *
623                  * If the per-engine reset fails, all hope is lost! We resort
624                  * to a full GPU reset in that unlikely case, but realistically
625                  * if the engine could not reset, the full reset does not fare
626                  * much better. The damage has been done.
627                  *
628                  * However, if we cannot reset an engine by itself, we cannot
629                  * cleanup a hanging persistent context without causing
630                  * colateral damage, and we should not pretend we can by
631                  * exposing the interface.
632                  */
633                 if (!intel_has_reset_engine(&ctx->i915->gt))
634                         return -ENODEV;
635
636                 i915_gem_context_clear_persistence(ctx);
637         }
638
639         return 0;
640 }
641
642 static struct i915_gem_context *
643 __create_context(struct drm_i915_private *i915)
644 {
645         struct i915_gem_context *ctx;
646         struct i915_gem_engines *e;
647         int err;
648         int i;
649
650         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
651         if (!ctx)
652                 return ERR_PTR(-ENOMEM);
653
654         kref_init(&ctx->ref);
655         ctx->i915 = i915;
656         ctx->sched.priority = I915_PRIORITY_NORMAL;
657         mutex_init(&ctx->mutex);
658         INIT_LIST_HEAD(&ctx->link);
659
660         spin_lock_init(&ctx->stale.lock);
661         INIT_LIST_HEAD(&ctx->stale.engines);
662
663         mutex_init(&ctx->engines_mutex);
664         e = default_engines(ctx);
665         if (IS_ERR(e)) {
666                 err = PTR_ERR(e);
667                 goto err_free;
668         }
669         RCU_INIT_POINTER(ctx->engines, e);
670
671         INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
672         mutex_init(&ctx->lut_mutex);
673
674         /* NB: Mark all slices as needing a remap so that when the context first
675          * loads it will restore whatever remap state already exists. If there
676          * is no remap info, it will be a NOP. */
677         ctx->remap_slice = ALL_L3_SLICES(i915);
678
679         i915_gem_context_set_bannable(ctx);
680         i915_gem_context_set_recoverable(ctx);
681         __context_set_persistence(ctx, true /* cgroup hook? */);
682
683         for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
684                 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
685
686         return ctx;
687
688 err_free:
689         kfree(ctx);
690         return ERR_PTR(err);
691 }
692
693 static inline struct i915_gem_engines *
694 __context_engines_await(const struct i915_gem_context *ctx,
695                         bool *user_engines)
696 {
697         struct i915_gem_engines *engines;
698
699         rcu_read_lock();
700         do {
701                 engines = rcu_dereference(ctx->engines);
702                 GEM_BUG_ON(!engines);
703
704                 if (user_engines)
705                         *user_engines = i915_gem_context_user_engines(ctx);
706
707                 /* successful await => strong mb */
708                 if (unlikely(!i915_sw_fence_await(&engines->fence)))
709                         continue;
710
711                 if (likely(engines == rcu_access_pointer(ctx->engines)))
712                         break;
713
714                 i915_sw_fence_complete(&engines->fence);
715         } while (1);
716         rcu_read_unlock();
717
718         return engines;
719 }
720
721 static void
722 context_apply_all(struct i915_gem_context *ctx,
723                   void (*fn)(struct intel_context *ce, void *data),
724                   void *data)
725 {
726         struct i915_gem_engines_iter it;
727         struct i915_gem_engines *e;
728         struct intel_context *ce;
729
730         e = __context_engines_await(ctx, NULL);
731         for_each_gem_engine(ce, e, it)
732                 fn(ce, data);
733         i915_sw_fence_complete(&e->fence);
734 }
735
736 static void __apply_ppgtt(struct intel_context *ce, void *vm)
737 {
738         i915_vm_put(ce->vm);
739         ce->vm = i915_vm_get(vm);
740 }
741
742 static struct i915_address_space *
743 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
744 {
745         struct i915_address_space *old;
746
747         old = rcu_replace_pointer(ctx->vm,
748                                   i915_vm_open(vm),
749                                   lockdep_is_held(&ctx->mutex));
750         GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
751
752         context_apply_all(ctx, __apply_ppgtt, vm);
753
754         return old;
755 }
756
757 static void __assign_ppgtt(struct i915_gem_context *ctx,
758                            struct i915_address_space *vm)
759 {
760         if (vm == rcu_access_pointer(ctx->vm))
761                 return;
762
763         vm = __set_ppgtt(ctx, vm);
764         if (vm)
765                 i915_vm_close(vm);
766 }
767
768 static void __set_timeline(struct intel_timeline **dst,
769                            struct intel_timeline *src)
770 {
771         struct intel_timeline *old = *dst;
772
773         *dst = src ? intel_timeline_get(src) : NULL;
774
775         if (old)
776                 intel_timeline_put(old);
777 }
778
779 static void __apply_timeline(struct intel_context *ce, void *timeline)
780 {
781         __set_timeline(&ce->timeline, timeline);
782 }
783
784 static void __assign_timeline(struct i915_gem_context *ctx,
785                               struct intel_timeline *timeline)
786 {
787         __set_timeline(&ctx->timeline, timeline);
788         context_apply_all(ctx, __apply_timeline, timeline);
789 }
790
791 static struct i915_gem_context *
792 i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
793 {
794         struct i915_gem_context *ctx;
795
796         if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
797             !HAS_EXECLISTS(i915))
798                 return ERR_PTR(-EINVAL);
799
800         ctx = __create_context(i915);
801         if (IS_ERR(ctx))
802                 return ctx;
803
804         if (HAS_FULL_PPGTT(i915)) {
805                 struct i915_ppgtt *ppgtt;
806
807                 ppgtt = i915_ppgtt_create(&i915->gt);
808                 if (IS_ERR(ppgtt)) {
809                         drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
810                                 PTR_ERR(ppgtt));
811                         context_close(ctx);
812                         return ERR_CAST(ppgtt);
813                 }
814
815                 mutex_lock(&ctx->mutex);
816                 __assign_ppgtt(ctx, &ppgtt->vm);
817                 mutex_unlock(&ctx->mutex);
818
819                 i915_vm_put(&ppgtt->vm);
820         }
821
822         if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
823                 struct intel_timeline *timeline;
824
825                 timeline = intel_timeline_create(&i915->gt);
826                 if (IS_ERR(timeline)) {
827                         context_close(ctx);
828                         return ERR_CAST(timeline);
829                 }
830
831                 __assign_timeline(ctx, timeline);
832                 intel_timeline_put(timeline);
833         }
834
835         trace_i915_context_create(ctx);
836
837         return ctx;
838 }
839
840 static void init_contexts(struct i915_gem_contexts *gc)
841 {
842         spin_lock_init(&gc->lock);
843         INIT_LIST_HEAD(&gc->list);
844 }
845
846 void i915_gem_init__contexts(struct drm_i915_private *i915)
847 {
848         init_contexts(&i915->gem.contexts);
849 }
850
851 static int gem_context_register(struct i915_gem_context *ctx,
852                                 struct drm_i915_file_private *fpriv,
853                                 u32 *id)
854 {
855         struct drm_i915_private *i915 = ctx->i915;
856         struct i915_address_space *vm;
857         int ret;
858
859         ctx->file_priv = fpriv;
860
861         mutex_lock(&ctx->mutex);
862         vm = i915_gem_context_vm(ctx);
863         if (vm)
864                 WRITE_ONCE(vm->file, fpriv); /* XXX */
865         mutex_unlock(&ctx->mutex);
866
867         ctx->pid = get_task_pid(current, PIDTYPE_PID);
868         snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
869                  current->comm, pid_nr(ctx->pid));
870
871         /* And finally expose ourselves to userspace via the idr */
872         ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
873         if (ret)
874                 goto err_pid;
875
876         spin_lock(&i915->gem.contexts.lock);
877         list_add_tail(&ctx->link, &i915->gem.contexts.list);
878         spin_unlock(&i915->gem.contexts.lock);
879
880         return 0;
881
882 err_pid:
883         put_pid(fetch_and_zero(&ctx->pid));
884         return ret;
885 }
886
887 int i915_gem_context_open(struct drm_i915_private *i915,
888                           struct drm_file *file)
889 {
890         struct drm_i915_file_private *file_priv = file->driver_priv;
891         struct i915_gem_context *ctx;
892         int err;
893         u32 id;
894
895         xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
896
897         /* 0 reserved for invalid/unassigned ppgtt */
898         xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
899
900         ctx = i915_gem_create_context(i915, 0);
901         if (IS_ERR(ctx)) {
902                 err = PTR_ERR(ctx);
903                 goto err;
904         }
905
906         err = gem_context_register(ctx, file_priv, &id);
907         if (err < 0)
908                 goto err_ctx;
909
910         GEM_BUG_ON(id);
911         return 0;
912
913 err_ctx:
914         context_close(ctx);
915 err:
916         xa_destroy(&file_priv->vm_xa);
917         xa_destroy(&file_priv->context_xa);
918         return err;
919 }
920
921 void i915_gem_context_close(struct drm_file *file)
922 {
923         struct drm_i915_file_private *file_priv = file->driver_priv;
924         struct i915_address_space *vm;
925         struct i915_gem_context *ctx;
926         unsigned long idx;
927
928         xa_for_each(&file_priv->context_xa, idx, ctx)
929                 context_close(ctx);
930         xa_destroy(&file_priv->context_xa);
931
932         xa_for_each(&file_priv->vm_xa, idx, vm)
933                 i915_vm_put(vm);
934         xa_destroy(&file_priv->vm_xa);
935 }
936
937 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
938                              struct drm_file *file)
939 {
940         struct drm_i915_private *i915 = to_i915(dev);
941         struct drm_i915_gem_vm_control *args = data;
942         struct drm_i915_file_private *file_priv = file->driver_priv;
943         struct i915_ppgtt *ppgtt;
944         u32 id;
945         int err;
946
947         if (!HAS_FULL_PPGTT(i915))
948                 return -ENODEV;
949
950         if (args->flags)
951                 return -EINVAL;
952
953         ppgtt = i915_ppgtt_create(&i915->gt);
954         if (IS_ERR(ppgtt))
955                 return PTR_ERR(ppgtt);
956
957         ppgtt->vm.file = file_priv;
958
959         if (args->extensions) {
960                 err = i915_user_extensions(u64_to_user_ptr(args->extensions),
961                                            NULL, 0,
962                                            ppgtt);
963                 if (err)
964                         goto err_put;
965         }
966
967         err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
968                        xa_limit_32b, GFP_KERNEL);
969         if (err)
970                 goto err_put;
971
972         GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
973         args->vm_id = id;
974         return 0;
975
976 err_put:
977         i915_vm_put(&ppgtt->vm);
978         return err;
979 }
980
981 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
982                               struct drm_file *file)
983 {
984         struct drm_i915_file_private *file_priv = file->driver_priv;
985         struct drm_i915_gem_vm_control *args = data;
986         struct i915_address_space *vm;
987
988         if (args->flags)
989                 return -EINVAL;
990
991         if (args->extensions)
992                 return -EINVAL;
993
994         vm = xa_erase(&file_priv->vm_xa, args->vm_id);
995         if (!vm)
996                 return -ENOENT;
997
998         i915_vm_put(vm);
999         return 0;
1000 }
1001
1002 struct context_barrier_task {
1003         struct i915_active base;
1004         void (*task)(void *data);
1005         void *data;
1006 };
1007
1008 static void cb_retire(struct i915_active *base)
1009 {
1010         struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
1011
1012         if (cb->task)
1013                 cb->task(cb->data);
1014
1015         i915_active_fini(&cb->base);
1016         kfree(cb);
1017 }
1018
1019 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
1020 static int context_barrier_task(struct i915_gem_context *ctx,
1021                                 intel_engine_mask_t engines,
1022                                 bool (*skip)(struct intel_context *ce, void *data),
1023                                 int (*pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data),
1024                                 int (*emit)(struct i915_request *rq, void *data),
1025                                 void (*task)(void *data),
1026                                 void *data)
1027 {
1028         struct context_barrier_task *cb;
1029         struct i915_gem_engines_iter it;
1030         struct i915_gem_engines *e;
1031         struct i915_gem_ww_ctx ww;
1032         struct intel_context *ce;
1033         int err = 0;
1034
1035         GEM_BUG_ON(!task);
1036
1037         cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1038         if (!cb)
1039                 return -ENOMEM;
1040
1041         i915_active_init(&cb->base, NULL, cb_retire, 0);
1042         err = i915_active_acquire(&cb->base);
1043         if (err) {
1044                 kfree(cb);
1045                 return err;
1046         }
1047
1048         e = __context_engines_await(ctx, NULL);
1049         if (!e) {
1050                 i915_active_release(&cb->base);
1051                 return -ENOENT;
1052         }
1053
1054         for_each_gem_engine(ce, e, it) {
1055                 struct i915_request *rq;
1056
1057                 if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
1058                                        ce->engine->mask)) {
1059                         err = -ENXIO;
1060                         break;
1061                 }
1062
1063                 if (!(ce->engine->mask & engines))
1064                         continue;
1065
1066                 if (skip && skip(ce, data))
1067                         continue;
1068
1069                 i915_gem_ww_ctx_init(&ww, true);
1070 retry:
1071                 err = intel_context_pin_ww(ce, &ww);
1072                 if (err)
1073                         goto err;
1074
1075                 if (pin)
1076                         err = pin(ce, &ww, data);
1077                 if (err)
1078                         goto err_unpin;
1079
1080                 rq = i915_request_create(ce);
1081                 if (IS_ERR(rq)) {
1082                         err = PTR_ERR(rq);
1083                         goto err_unpin;
1084                 }
1085
1086                 err = 0;
1087                 if (emit)
1088                         err = emit(rq, data);
1089                 if (err == 0)
1090                         err = i915_active_add_request(&cb->base, rq);
1091
1092                 i915_request_add(rq);
1093 err_unpin:
1094                 intel_context_unpin(ce);
1095 err:
1096                 if (err == -EDEADLK) {
1097                         err = i915_gem_ww_ctx_backoff(&ww);
1098                         if (!err)
1099                                 goto retry;
1100                 }
1101                 i915_gem_ww_ctx_fini(&ww);
1102
1103                 if (err)
1104                         break;
1105         }
1106         i915_sw_fence_complete(&e->fence);
1107
1108         cb->task = err ? NULL : task; /* caller needs to unwind instead */
1109         cb->data = data;
1110
1111         i915_active_release(&cb->base);
1112
1113         return err;
1114 }
1115
1116 static int get_ppgtt(struct drm_i915_file_private *file_priv,
1117                      struct i915_gem_context *ctx,
1118                      struct drm_i915_gem_context_param *args)
1119 {
1120         struct i915_address_space *vm;
1121         int err;
1122         u32 id;
1123
1124         if (!rcu_access_pointer(ctx->vm))
1125                 return -ENODEV;
1126
1127         rcu_read_lock();
1128         vm = context_get_vm_rcu(ctx);
1129         rcu_read_unlock();
1130         if (!vm)
1131                 return -ENODEV;
1132
1133         err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
1134         if (err)
1135                 goto err_put;
1136
1137         i915_vm_open(vm);
1138
1139         GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
1140         args->value = id;
1141         args->size = 0;
1142
1143 err_put:
1144         i915_vm_put(vm);
1145         return err;
1146 }
1147
1148 static void set_ppgtt_barrier(void *data)
1149 {
1150         struct i915_address_space *old = data;
1151
1152         if (GRAPHICS_VER(old->i915) < 8)
1153                 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
1154
1155         i915_vm_close(old);
1156 }
1157
1158 static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data)
1159 {
1160         struct i915_address_space *vm = ce->vm;
1161
1162         if (!HAS_LOGICAL_RING_CONTEXTS(vm->i915))
1163                 /* ppGTT is not part of the legacy context image */
1164                 return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm), ww);
1165
1166         return 0;
1167 }
1168
1169 static int emit_ppgtt_update(struct i915_request *rq, void *data)
1170 {
1171         struct i915_address_space *vm = rq->context->vm;
1172         struct intel_engine_cs *engine = rq->engine;
1173         u32 base = engine->mmio_base;
1174         u32 *cs;
1175         int i;
1176
1177         if (i915_vm_is_4lvl(vm)) {
1178                 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1179                 const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
1180
1181                 cs = intel_ring_begin(rq, 6);
1182                 if (IS_ERR(cs))
1183                         return PTR_ERR(cs);
1184
1185                 *cs++ = MI_LOAD_REGISTER_IMM(2);
1186
1187                 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
1188                 *cs++ = upper_32_bits(pd_daddr);
1189                 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
1190                 *cs++ = lower_32_bits(pd_daddr);
1191
1192                 *cs++ = MI_NOOP;
1193                 intel_ring_advance(rq, cs);
1194         } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
1195                 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1196                 int err;
1197
1198                 /* Magic required to prevent forcewake errors! */
1199                 err = engine->emit_flush(rq, EMIT_INVALIDATE);
1200                 if (err)
1201                         return err;
1202
1203                 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
1204                 if (IS_ERR(cs))
1205                         return PTR_ERR(cs);
1206
1207                 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
1208                 for (i = GEN8_3LVL_PDPES; i--; ) {
1209                         const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1210
1211                         *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
1212                         *cs++ = upper_32_bits(pd_daddr);
1213                         *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
1214                         *cs++ = lower_32_bits(pd_daddr);
1215                 }
1216                 *cs++ = MI_NOOP;
1217                 intel_ring_advance(rq, cs);
1218         }
1219
1220         return 0;
1221 }
1222
1223 static bool skip_ppgtt_update(struct intel_context *ce, void *data)
1224 {
1225         if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
1226                 return !ce->state;
1227         else
1228                 return !atomic_read(&ce->pin_count);
1229 }
1230
1231 static int set_ppgtt(struct drm_i915_file_private *file_priv,
1232                      struct i915_gem_context *ctx,
1233                      struct drm_i915_gem_context_param *args)
1234 {
1235         struct i915_address_space *vm, *old;
1236         int err;
1237
1238         if (args->size)
1239                 return -EINVAL;
1240
1241         if (!rcu_access_pointer(ctx->vm))
1242                 return -ENODEV;
1243
1244         if (upper_32_bits(args->value))
1245                 return -ENOENT;
1246
1247         rcu_read_lock();
1248         vm = xa_load(&file_priv->vm_xa, args->value);
1249         if (vm && !kref_get_unless_zero(&vm->ref))
1250                 vm = NULL;
1251         rcu_read_unlock();
1252         if (!vm)
1253                 return -ENOENT;
1254
1255         err = mutex_lock_interruptible(&ctx->mutex);
1256         if (err)
1257                 goto out;
1258
1259         if (i915_gem_context_is_closed(ctx)) {
1260                 err = -ENOENT;
1261                 goto unlock;
1262         }
1263
1264         if (vm == rcu_access_pointer(ctx->vm))
1265                 goto unlock;
1266
1267         old = __set_ppgtt(ctx, vm);
1268
1269         /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
1270         lut_close(ctx);
1271
1272         /*
1273          * We need to flush any requests using the current ppgtt before
1274          * we release it as the requests do not hold a reference themselves,
1275          * only indirectly through the context.
1276          */
1277         err = context_barrier_task(ctx, ALL_ENGINES,
1278                                    skip_ppgtt_update,
1279                                    pin_ppgtt_update,
1280                                    emit_ppgtt_update,
1281                                    set_ppgtt_barrier,
1282                                    old);
1283         if (err) {
1284                 i915_vm_close(__set_ppgtt(ctx, old));
1285                 i915_vm_close(old);
1286                 lut_close(ctx); /* force a rebuild of the old obj:vma cache */
1287         }
1288
1289 unlock:
1290         mutex_unlock(&ctx->mutex);
1291 out:
1292         i915_vm_put(vm);
1293         return err;
1294 }
1295
1296 int
1297 i915_gem_user_to_context_sseu(struct intel_gt *gt,
1298                               const struct drm_i915_gem_context_param_sseu *user,
1299                               struct intel_sseu *context)
1300 {
1301         const struct sseu_dev_info *device = &gt->info.sseu;
1302         struct drm_i915_private *i915 = gt->i915;
1303
1304         /* No zeros in any field. */
1305         if (!user->slice_mask || !user->subslice_mask ||
1306             !user->min_eus_per_subslice || !user->max_eus_per_subslice)
1307                 return -EINVAL;
1308
1309         /* Max > min. */
1310         if (user->max_eus_per_subslice < user->min_eus_per_subslice)
1311                 return -EINVAL;
1312
1313         /*
1314          * Some future proofing on the types since the uAPI is wider than the
1315          * current internal implementation.
1316          */
1317         if (overflows_type(user->slice_mask, context->slice_mask) ||
1318             overflows_type(user->subslice_mask, context->subslice_mask) ||
1319             overflows_type(user->min_eus_per_subslice,
1320                            context->min_eus_per_subslice) ||
1321             overflows_type(user->max_eus_per_subslice,
1322                            context->max_eus_per_subslice))
1323                 return -EINVAL;
1324
1325         /* Check validity against hardware. */
1326         if (user->slice_mask & ~device->slice_mask)
1327                 return -EINVAL;
1328
1329         if (user->subslice_mask & ~device->subslice_mask[0])
1330                 return -EINVAL;
1331
1332         if (user->max_eus_per_subslice > device->max_eus_per_subslice)
1333                 return -EINVAL;
1334
1335         context->slice_mask = user->slice_mask;
1336         context->subslice_mask = user->subslice_mask;
1337         context->min_eus_per_subslice = user->min_eus_per_subslice;
1338         context->max_eus_per_subslice = user->max_eus_per_subslice;
1339
1340         /* Part specific restrictions. */
1341         if (GRAPHICS_VER(i915) == 11) {
1342                 unsigned int hw_s = hweight8(device->slice_mask);
1343                 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]);
1344                 unsigned int req_s = hweight8(context->slice_mask);
1345                 unsigned int req_ss = hweight8(context->subslice_mask);
1346
1347                 /*
1348                  * Only full subslice enablement is possible if more than one
1349                  * slice is turned on.
1350                  */
1351                 if (req_s > 1 && req_ss != hw_ss_per_s)
1352                         return -EINVAL;
1353
1354                 /*
1355                  * If more than four (SScount bitfield limit) subslices are
1356                  * requested then the number has to be even.
1357                  */
1358                 if (req_ss > 4 && (req_ss & 1))
1359                         return -EINVAL;
1360
1361                 /*
1362                  * If only one slice is enabled and subslice count is below the
1363                  * device full enablement, it must be at most half of the all
1364                  * available subslices.
1365                  */
1366                 if (req_s == 1 && req_ss < hw_ss_per_s &&
1367                     req_ss > (hw_ss_per_s / 2))
1368                         return -EINVAL;
1369
1370                 /* ABI restriction - VME use case only. */
1371
1372                 /* All slices or one slice only. */
1373                 if (req_s != 1 && req_s != hw_s)
1374                         return -EINVAL;
1375
1376                 /*
1377                  * Half subslices or full enablement only when one slice is
1378                  * enabled.
1379                  */
1380                 if (req_s == 1 &&
1381                     (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2)))
1382                         return -EINVAL;
1383
1384                 /* No EU configuration changes. */
1385                 if ((user->min_eus_per_subslice !=
1386                      device->max_eus_per_subslice) ||
1387                     (user->max_eus_per_subslice !=
1388                      device->max_eus_per_subslice))
1389                         return -EINVAL;
1390         }
1391
1392         return 0;
1393 }
1394
1395 static int set_sseu(struct i915_gem_context *ctx,
1396                     struct drm_i915_gem_context_param *args)
1397 {
1398         struct drm_i915_private *i915 = ctx->i915;
1399         struct drm_i915_gem_context_param_sseu user_sseu;
1400         struct intel_context *ce;
1401         struct intel_sseu sseu;
1402         unsigned long lookup;
1403         int ret;
1404
1405         if (args->size < sizeof(user_sseu))
1406                 return -EINVAL;
1407
1408         if (GRAPHICS_VER(i915) != 11)
1409                 return -ENODEV;
1410
1411         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
1412                            sizeof(user_sseu)))
1413                 return -EFAULT;
1414
1415         if (user_sseu.rsvd)
1416                 return -EINVAL;
1417
1418         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
1419                 return -EINVAL;
1420
1421         lookup = 0;
1422         if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
1423                 lookup |= LOOKUP_USER_INDEX;
1424
1425         ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
1426         if (IS_ERR(ce))
1427                 return PTR_ERR(ce);
1428
1429         /* Only render engine supports RPCS configuration. */
1430         if (ce->engine->class != RENDER_CLASS) {
1431                 ret = -ENODEV;
1432                 goto out_ce;
1433         }
1434
1435         ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
1436         if (ret)
1437                 goto out_ce;
1438
1439         ret = intel_context_reconfigure_sseu(ce, sseu);
1440         if (ret)
1441                 goto out_ce;
1442
1443         args->size = sizeof(user_sseu);
1444
1445 out_ce:
1446         intel_context_put(ce);
1447         return ret;
1448 }
1449
1450 struct set_engines {
1451         struct i915_gem_context *ctx;
1452         struct i915_gem_engines *engines;
1453 };
1454
1455 static int
1456 set_engines__load_balance(struct i915_user_extension __user *base, void *data)
1457 {
1458         struct i915_context_engines_load_balance __user *ext =
1459                 container_of_user(base, typeof(*ext), base);
1460         const struct set_engines *set = data;
1461         struct drm_i915_private *i915 = set->ctx->i915;
1462         struct intel_engine_cs *stack[16];
1463         struct intel_engine_cs **siblings;
1464         struct intel_context *ce;
1465         u16 num_siblings, idx;
1466         unsigned int n;
1467         int err;
1468
1469         if (!HAS_EXECLISTS(i915))
1470                 return -ENODEV;
1471
1472         if (intel_uc_uses_guc_submission(&i915->gt.uc))
1473                 return -ENODEV; /* not implement yet */
1474
1475         if (get_user(idx, &ext->engine_index))
1476                 return -EFAULT;
1477
1478         if (idx >= set->engines->num_engines) {
1479                 drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
1480                         idx, set->engines->num_engines);
1481                 return -EINVAL;
1482         }
1483
1484         idx = array_index_nospec(idx, set->engines->num_engines);
1485         if (set->engines->engines[idx]) {
1486                 drm_dbg(&i915->drm,
1487                         "Invalid placement[%d], already occupied\n", idx);
1488                 return -EEXIST;
1489         }
1490
1491         if (get_user(num_siblings, &ext->num_siblings))
1492                 return -EFAULT;
1493
1494         err = check_user_mbz(&ext->flags);
1495         if (err)
1496                 return err;
1497
1498         err = check_user_mbz(&ext->mbz64);
1499         if (err)
1500                 return err;
1501
1502         siblings = stack;
1503         if (num_siblings > ARRAY_SIZE(stack)) {
1504                 siblings = kmalloc_array(num_siblings,
1505                                          sizeof(*siblings),
1506                                          GFP_KERNEL);
1507                 if (!siblings)
1508                         return -ENOMEM;
1509         }
1510
1511         for (n = 0; n < num_siblings; n++) {
1512                 struct i915_engine_class_instance ci;
1513
1514                 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
1515                         err = -EFAULT;
1516                         goto out_siblings;
1517                 }
1518
1519                 siblings[n] = intel_engine_lookup_user(i915,
1520                                                        ci.engine_class,
1521                                                        ci.engine_instance);
1522                 if (!siblings[n]) {
1523                         drm_dbg(&i915->drm,
1524                                 "Invalid sibling[%d]: { class:%d, inst:%d }\n",
1525                                 n, ci.engine_class, ci.engine_instance);
1526                         err = -EINVAL;
1527                         goto out_siblings;
1528                 }
1529         }
1530
1531         ce = intel_execlists_create_virtual(siblings, n);
1532         if (IS_ERR(ce)) {
1533                 err = PTR_ERR(ce);
1534                 goto out_siblings;
1535         }
1536
1537         intel_context_set_gem(ce, set->ctx);
1538
1539         if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
1540                 intel_context_put(ce);
1541                 err = -EEXIST;
1542                 goto out_siblings;
1543         }
1544
1545 out_siblings:
1546         if (siblings != stack)
1547                 kfree(siblings);
1548
1549         return err;
1550 }
1551
1552 static int
1553 set_engines__bond(struct i915_user_extension __user *base, void *data)
1554 {
1555         struct i915_context_engines_bond __user *ext =
1556                 container_of_user(base, typeof(*ext), base);
1557         const struct set_engines *set = data;
1558         struct drm_i915_private *i915 = set->ctx->i915;
1559         struct i915_engine_class_instance ci;
1560         struct intel_engine_cs *virtual;
1561         struct intel_engine_cs *master;
1562         u16 idx, num_bonds;
1563         int err, n;
1564
1565         if (get_user(idx, &ext->virtual_index))
1566                 return -EFAULT;
1567
1568         if (idx >= set->engines->num_engines) {
1569                 drm_dbg(&i915->drm,
1570                         "Invalid index for virtual engine: %d >= %d\n",
1571                         idx, set->engines->num_engines);
1572                 return -EINVAL;
1573         }
1574
1575         idx = array_index_nospec(idx, set->engines->num_engines);
1576         if (!set->engines->engines[idx]) {
1577                 drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
1578                 return -EINVAL;
1579         }
1580         virtual = set->engines->engines[idx]->engine;
1581
1582         err = check_user_mbz(&ext->flags);
1583         if (err)
1584                 return err;
1585
1586         for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
1587                 err = check_user_mbz(&ext->mbz64[n]);
1588                 if (err)
1589                         return err;
1590         }
1591
1592         if (copy_from_user(&ci, &ext->master, sizeof(ci)))
1593                 return -EFAULT;
1594
1595         master = intel_engine_lookup_user(i915,
1596                                           ci.engine_class, ci.engine_instance);
1597         if (!master) {
1598                 drm_dbg(&i915->drm,
1599                         "Unrecognised master engine: { class:%u, instance:%u }\n",
1600                         ci.engine_class, ci.engine_instance);
1601                 return -EINVAL;
1602         }
1603
1604         if (get_user(num_bonds, &ext->num_bonds))
1605                 return -EFAULT;
1606
1607         for (n = 0; n < num_bonds; n++) {
1608                 struct intel_engine_cs *bond;
1609
1610                 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
1611                         return -EFAULT;
1612
1613                 bond = intel_engine_lookup_user(i915,
1614                                                 ci.engine_class,
1615                                                 ci.engine_instance);
1616                 if (!bond) {
1617                         drm_dbg(&i915->drm,
1618                                 "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
1619                                 n, ci.engine_class, ci.engine_instance);
1620                         return -EINVAL;
1621                 }
1622
1623                 /*
1624                  * A non-virtual engine has no siblings to choose between; and
1625                  * a submit fence will always be directed to the one engine.
1626                  */
1627                 if (intel_engine_is_virtual(virtual)) {
1628                         err = intel_virtual_engine_attach_bond(virtual,
1629                                                                master,
1630                                                                bond);
1631                         if (err)
1632                                 return err;
1633                 }
1634         }
1635
1636         return 0;
1637 }
1638
1639 static const i915_user_extension_fn set_engines__extensions[] = {
1640         [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
1641         [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
1642 };
1643
1644 static int
1645 set_engines(struct i915_gem_context *ctx,
1646             const struct drm_i915_gem_context_param *args)
1647 {
1648         struct drm_i915_private *i915 = ctx->i915;
1649         struct i915_context_param_engines __user *user =
1650                 u64_to_user_ptr(args->value);
1651         struct set_engines set = { .ctx = ctx };
1652         unsigned int num_engines, n;
1653         u64 extensions;
1654         int err;
1655
1656         if (!args->size) { /* switch back to legacy user_ring_map */
1657                 if (!i915_gem_context_user_engines(ctx))
1658                         return 0;
1659
1660                 set.engines = default_engines(ctx);
1661                 if (IS_ERR(set.engines))
1662                         return PTR_ERR(set.engines);
1663
1664                 goto replace;
1665         }
1666
1667         BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
1668         if (args->size < sizeof(*user) ||
1669             !IS_ALIGNED(args->size, sizeof(*user->engines))) {
1670                 drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
1671                         args->size);
1672                 return -EINVAL;
1673         }
1674
1675         /*
1676          * Note that I915_EXEC_RING_MASK limits execbuf to only using the
1677          * first 64 engines defined here.
1678          */
1679         num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
1680         set.engines = alloc_engines(num_engines);
1681         if (!set.engines)
1682                 return -ENOMEM;
1683
1684         for (n = 0; n < num_engines; n++) {
1685                 struct i915_engine_class_instance ci;
1686                 struct intel_engine_cs *engine;
1687                 struct intel_context *ce;
1688
1689                 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
1690                         __free_engines(set.engines, n);
1691                         return -EFAULT;
1692                 }
1693
1694                 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
1695                     ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
1696                         set.engines->engines[n] = NULL;
1697                         continue;
1698                 }
1699
1700                 engine = intel_engine_lookup_user(ctx->i915,
1701                                                   ci.engine_class,
1702                                                   ci.engine_instance);
1703                 if (!engine) {
1704                         drm_dbg(&i915->drm,
1705                                 "Invalid engine[%d]: { class:%d, instance:%d }\n",
1706                                 n, ci.engine_class, ci.engine_instance);
1707                         __free_engines(set.engines, n);
1708                         return -ENOENT;
1709                 }
1710
1711                 ce = intel_context_create(engine);
1712                 if (IS_ERR(ce)) {
1713                         __free_engines(set.engines, n);
1714                         return PTR_ERR(ce);
1715                 }
1716
1717                 intel_context_set_gem(ce, ctx);
1718
1719                 set.engines->engines[n] = ce;
1720         }
1721         set.engines->num_engines = num_engines;
1722
1723         err = -EFAULT;
1724         if (!get_user(extensions, &user->extensions))
1725                 err = i915_user_extensions(u64_to_user_ptr(extensions),
1726                                            set_engines__extensions,
1727                                            ARRAY_SIZE(set_engines__extensions),
1728                                            &set);
1729         if (err) {
1730                 free_engines(set.engines);
1731                 return err;
1732         }
1733
1734 replace:
1735         mutex_lock(&ctx->engines_mutex);
1736         if (i915_gem_context_is_closed(ctx)) {
1737                 mutex_unlock(&ctx->engines_mutex);
1738                 free_engines(set.engines);
1739                 return -ENOENT;
1740         }
1741         if (args->size)
1742                 i915_gem_context_set_user_engines(ctx);
1743         else
1744                 i915_gem_context_clear_user_engines(ctx);
1745         set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1);
1746         mutex_unlock(&ctx->engines_mutex);
1747
1748         /* Keep track of old engine sets for kill_context() */
1749         engines_idle_release(ctx, set.engines);
1750
1751         return 0;
1752 }
1753
1754 static int
1755 get_engines(struct i915_gem_context *ctx,
1756             struct drm_i915_gem_context_param *args)
1757 {
1758         struct i915_context_param_engines __user *user;
1759         struct i915_gem_engines *e;
1760         size_t n, count, size;
1761         bool user_engines;
1762         int err = 0;
1763
1764         e = __context_engines_await(ctx, &user_engines);
1765         if (!e)
1766                 return -ENOENT;
1767
1768         if (!user_engines) {
1769                 i915_sw_fence_complete(&e->fence);
1770                 args->size = 0;
1771                 return 0;
1772         }
1773
1774         count = e->num_engines;
1775
1776         /* Be paranoid in case we have an impedance mismatch */
1777         if (!check_struct_size(user, engines, count, &size)) {
1778                 err = -EINVAL;
1779                 goto err_free;
1780         }
1781         if (overflows_type(size, args->size)) {
1782                 err = -EINVAL;
1783                 goto err_free;
1784         }
1785
1786         if (!args->size) {
1787                 args->size = size;
1788                 goto err_free;
1789         }
1790
1791         if (args->size < size) {
1792                 err = -EINVAL;
1793                 goto err_free;
1794         }
1795
1796         user = u64_to_user_ptr(args->value);
1797         if (put_user(0, &user->extensions)) {
1798                 err = -EFAULT;
1799                 goto err_free;
1800         }
1801
1802         for (n = 0; n < count; n++) {
1803                 struct i915_engine_class_instance ci = {
1804                         .engine_class = I915_ENGINE_CLASS_INVALID,
1805                         .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
1806                 };
1807
1808                 if (e->engines[n]) {
1809                         ci.engine_class = e->engines[n]->engine->uabi_class;
1810                         ci.engine_instance = e->engines[n]->engine->uabi_instance;
1811                 }
1812
1813                 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
1814                         err = -EFAULT;
1815                         goto err_free;
1816                 }
1817         }
1818
1819         args->size = size;
1820
1821 err_free:
1822         i915_sw_fence_complete(&e->fence);
1823         return err;
1824 }
1825
1826 static int
1827 set_persistence(struct i915_gem_context *ctx,
1828                 const struct drm_i915_gem_context_param *args)
1829 {
1830         if (args->size)
1831                 return -EINVAL;
1832
1833         return __context_set_persistence(ctx, args->value);
1834 }
1835
1836 static void __apply_priority(struct intel_context *ce, void *arg)
1837 {
1838         struct i915_gem_context *ctx = arg;
1839
1840         if (!intel_engine_has_timeslices(ce->engine))
1841                 return;
1842
1843         if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
1844                 intel_context_set_use_semaphores(ce);
1845         else
1846                 intel_context_clear_use_semaphores(ce);
1847 }
1848
1849 static int set_priority(struct i915_gem_context *ctx,
1850                         const struct drm_i915_gem_context_param *args)
1851 {
1852         s64 priority = args->value;
1853
1854         if (args->size)
1855                 return -EINVAL;
1856
1857         if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
1858                 return -ENODEV;
1859
1860         if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
1861             priority < I915_CONTEXT_MIN_USER_PRIORITY)
1862                 return -EINVAL;
1863
1864         if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
1865             !capable(CAP_SYS_NICE))
1866                 return -EPERM;
1867
1868         ctx->sched.priority = priority;
1869         context_apply_all(ctx, __apply_priority, ctx);
1870
1871         return 0;
1872 }
1873
1874 static int ctx_setparam(struct drm_i915_file_private *fpriv,
1875                         struct i915_gem_context *ctx,
1876                         struct drm_i915_gem_context_param *args)
1877 {
1878         int ret = 0;
1879
1880         switch (args->param) {
1881         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1882                 if (args->size)
1883                         ret = -EINVAL;
1884                 else if (args->value)
1885                         i915_gem_context_set_no_error_capture(ctx);
1886                 else
1887                         i915_gem_context_clear_no_error_capture(ctx);
1888                 break;
1889
1890         case I915_CONTEXT_PARAM_BANNABLE:
1891                 if (args->size)
1892                         ret = -EINVAL;
1893                 else if (!capable(CAP_SYS_ADMIN) && !args->value)
1894                         ret = -EPERM;
1895                 else if (args->value)
1896                         i915_gem_context_set_bannable(ctx);
1897                 else
1898                         i915_gem_context_clear_bannable(ctx);
1899                 break;
1900
1901         case I915_CONTEXT_PARAM_RECOVERABLE:
1902                 if (args->size)
1903                         ret = -EINVAL;
1904                 else if (args->value)
1905                         i915_gem_context_set_recoverable(ctx);
1906                 else
1907                         i915_gem_context_clear_recoverable(ctx);
1908                 break;
1909
1910         case I915_CONTEXT_PARAM_PRIORITY:
1911                 ret = set_priority(ctx, args);
1912                 break;
1913
1914         case I915_CONTEXT_PARAM_SSEU:
1915                 ret = set_sseu(ctx, args);
1916                 break;
1917
1918         case I915_CONTEXT_PARAM_VM:
1919                 ret = set_ppgtt(fpriv, ctx, args);
1920                 break;
1921
1922         case I915_CONTEXT_PARAM_ENGINES:
1923                 ret = set_engines(ctx, args);
1924                 break;
1925
1926         case I915_CONTEXT_PARAM_PERSISTENCE:
1927                 ret = set_persistence(ctx, args);
1928                 break;
1929
1930         case I915_CONTEXT_PARAM_NO_ZEROMAP:
1931         case I915_CONTEXT_PARAM_BAN_PERIOD:
1932         case I915_CONTEXT_PARAM_RINGSIZE:
1933         default:
1934                 ret = -EINVAL;
1935                 break;
1936         }
1937
1938         return ret;
1939 }
1940
1941 struct create_ext {
1942         struct i915_gem_context *ctx;
1943         struct drm_i915_file_private *fpriv;
1944 };
1945
1946 static int create_setparam(struct i915_user_extension __user *ext, void *data)
1947 {
1948         struct drm_i915_gem_context_create_ext_setparam local;
1949         const struct create_ext *arg = data;
1950
1951         if (copy_from_user(&local, ext, sizeof(local)))
1952                 return -EFAULT;
1953
1954         if (local.param.ctx_id)
1955                 return -EINVAL;
1956
1957         return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
1958 }
1959
1960 static int invalid_ext(struct i915_user_extension __user *ext, void *data)
1961 {
1962         return -EINVAL;
1963 }
1964
1965 static const i915_user_extension_fn create_extensions[] = {
1966         [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
1967         [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext,
1968 };
1969
1970 static bool client_is_banned(struct drm_i915_file_private *file_priv)
1971 {
1972         return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
1973 }
1974
1975 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
1976                                   struct drm_file *file)
1977 {
1978         struct drm_i915_private *i915 = to_i915(dev);
1979         struct drm_i915_gem_context_create_ext *args = data;
1980         struct create_ext ext_data;
1981         int ret;
1982         u32 id;
1983
1984         if (!DRIVER_CAPS(i915)->has_logical_contexts)
1985                 return -ENODEV;
1986
1987         if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
1988                 return -EINVAL;
1989
1990         ret = intel_gt_terminally_wedged(&i915->gt);
1991         if (ret)
1992                 return ret;
1993
1994         ext_data.fpriv = file->driver_priv;
1995         if (client_is_banned(ext_data.fpriv)) {
1996                 drm_dbg(&i915->drm,
1997                         "client %s[%d] banned from creating ctx\n",
1998                         current->comm, task_pid_nr(current));
1999                 return -EIO;
2000         }
2001
2002         ext_data.ctx = i915_gem_create_context(i915, args->flags);
2003         if (IS_ERR(ext_data.ctx))
2004                 return PTR_ERR(ext_data.ctx);
2005
2006         if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
2007                 ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
2008                                            create_extensions,
2009                                            ARRAY_SIZE(create_extensions),
2010                                            &ext_data);
2011                 if (ret)
2012                         goto err_ctx;
2013         }
2014
2015         ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id);
2016         if (ret < 0)
2017                 goto err_ctx;
2018
2019         args->ctx_id = id;
2020         drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
2021
2022         return 0;
2023
2024 err_ctx:
2025         context_close(ext_data.ctx);
2026         return ret;
2027 }
2028
2029 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2030                                    struct drm_file *file)
2031 {
2032         struct drm_i915_gem_context_destroy *args = data;
2033         struct drm_i915_file_private *file_priv = file->driver_priv;
2034         struct i915_gem_context *ctx;
2035
2036         if (args->pad != 0)
2037                 return -EINVAL;
2038
2039         if (!args->ctx_id)
2040                 return -ENOENT;
2041
2042         ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
2043         if (!ctx)
2044                 return -ENOENT;
2045
2046         context_close(ctx);
2047         return 0;
2048 }
2049
2050 static int get_sseu(struct i915_gem_context *ctx,
2051                     struct drm_i915_gem_context_param *args)
2052 {
2053         struct drm_i915_gem_context_param_sseu user_sseu;
2054         struct intel_context *ce;
2055         unsigned long lookup;
2056         int err;
2057
2058         if (args->size == 0)
2059                 goto out;
2060         else if (args->size < sizeof(user_sseu))
2061                 return -EINVAL;
2062
2063         if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
2064                            sizeof(user_sseu)))
2065                 return -EFAULT;
2066
2067         if (user_sseu.rsvd)
2068                 return -EINVAL;
2069
2070         if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
2071                 return -EINVAL;
2072
2073         lookup = 0;
2074         if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
2075                 lookup |= LOOKUP_USER_INDEX;
2076
2077         ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
2078         if (IS_ERR(ce))
2079                 return PTR_ERR(ce);
2080
2081         err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
2082         if (err) {
2083                 intel_context_put(ce);
2084                 return err;
2085         }
2086
2087         user_sseu.slice_mask = ce->sseu.slice_mask;
2088         user_sseu.subslice_mask = ce->sseu.subslice_mask;
2089         user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
2090         user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
2091
2092         intel_context_unlock_pinned(ce);
2093         intel_context_put(ce);
2094
2095         if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
2096                          sizeof(user_sseu)))
2097                 return -EFAULT;
2098
2099 out:
2100         args->size = sizeof(user_sseu);
2101
2102         return 0;
2103 }
2104
2105 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
2106                                     struct drm_file *file)
2107 {
2108         struct drm_i915_file_private *file_priv = file->driver_priv;
2109         struct drm_i915_gem_context_param *args = data;
2110         struct i915_gem_context *ctx;
2111         int ret = 0;
2112
2113         ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2114         if (!ctx)
2115                 return -ENOENT;
2116
2117         switch (args->param) {
2118         case I915_CONTEXT_PARAM_GTT_SIZE:
2119                 args->size = 0;
2120                 rcu_read_lock();
2121                 if (rcu_access_pointer(ctx->vm))
2122                         args->value = rcu_dereference(ctx->vm)->total;
2123                 else
2124                         args->value = to_i915(dev)->ggtt.vm.total;
2125                 rcu_read_unlock();
2126                 break;
2127
2128         case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
2129                 args->size = 0;
2130                 args->value = i915_gem_context_no_error_capture(ctx);
2131                 break;
2132
2133         case I915_CONTEXT_PARAM_BANNABLE:
2134                 args->size = 0;
2135                 args->value = i915_gem_context_is_bannable(ctx);
2136                 break;
2137
2138         case I915_CONTEXT_PARAM_RECOVERABLE:
2139                 args->size = 0;
2140                 args->value = i915_gem_context_is_recoverable(ctx);
2141                 break;
2142
2143         case I915_CONTEXT_PARAM_PRIORITY:
2144                 args->size = 0;
2145                 args->value = ctx->sched.priority;
2146                 break;
2147
2148         case I915_CONTEXT_PARAM_SSEU:
2149                 ret = get_sseu(ctx, args);
2150                 break;
2151
2152         case I915_CONTEXT_PARAM_VM:
2153                 ret = get_ppgtt(file_priv, ctx, args);
2154                 break;
2155
2156         case I915_CONTEXT_PARAM_ENGINES:
2157                 ret = get_engines(ctx, args);
2158                 break;
2159
2160         case I915_CONTEXT_PARAM_PERSISTENCE:
2161                 args->size = 0;
2162                 args->value = i915_gem_context_is_persistent(ctx);
2163                 break;
2164
2165         case I915_CONTEXT_PARAM_NO_ZEROMAP:
2166         case I915_CONTEXT_PARAM_BAN_PERIOD:
2167         case I915_CONTEXT_PARAM_RINGSIZE:
2168         default:
2169                 ret = -EINVAL;
2170                 break;
2171         }
2172
2173         i915_gem_context_put(ctx);
2174         return ret;
2175 }
2176
2177 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
2178                                     struct drm_file *file)
2179 {
2180         struct drm_i915_file_private *file_priv = file->driver_priv;
2181         struct drm_i915_gem_context_param *args = data;
2182         struct i915_gem_context *ctx;
2183         int ret;
2184
2185         ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
2186         if (!ctx)
2187                 return -ENOENT;
2188
2189         ret = ctx_setparam(file_priv, ctx, args);
2190
2191         i915_gem_context_put(ctx);
2192         return ret;
2193 }
2194
2195 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
2196                                        void *data, struct drm_file *file)
2197 {
2198         struct drm_i915_private *i915 = to_i915(dev);
2199         struct drm_i915_reset_stats *args = data;
2200         struct i915_gem_context *ctx;
2201         int ret;
2202
2203         if (args->flags || args->pad)
2204                 return -EINVAL;
2205
2206         ret = -ENOENT;
2207         rcu_read_lock();
2208         ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
2209         if (!ctx)
2210                 goto out;
2211
2212         /*
2213          * We opt for unserialised reads here. This may result in tearing
2214          * in the extremely unlikely event of a GPU hang on this context
2215          * as we are querying them. If we need that extra layer of protection,
2216          * we should wrap the hangstats with a seqlock.
2217          */
2218
2219         if (capable(CAP_SYS_ADMIN))
2220                 args->reset_count = i915_reset_count(&i915->gpu_error);
2221         else
2222                 args->reset_count = 0;
2223
2224         args->batch_active = atomic_read(&ctx->guilty_count);
2225         args->batch_pending = atomic_read(&ctx->active_count);
2226
2227         ret = 0;
2228 out:
2229         rcu_read_unlock();
2230         return ret;
2231 }
2232
2233 /* GEM context-engines iterator: for_each_gem_engine() */
2234 struct intel_context *
2235 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
2236 {
2237         const struct i915_gem_engines *e = it->engines;
2238         struct intel_context *ctx;
2239
2240         if (unlikely(!e))
2241                 return NULL;
2242
2243         do {
2244                 if (it->idx >= e->num_engines)
2245                         return NULL;
2246
2247                 ctx = e->engines[it->idx++];
2248         } while (!ctx);
2249
2250         return ctx;
2251 }
2252
2253 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2254 #include "selftests/mock_context.c"
2255 #include "selftests/i915_gem_context.c"
2256 #endif
2257
2258 static void i915_global_gem_context_shrink(void)
2259 {
2260         kmem_cache_shrink(global.slab_luts);
2261 }
2262
2263 static void i915_global_gem_context_exit(void)
2264 {
2265         kmem_cache_destroy(global.slab_luts);
2266 }
2267
2268 static struct i915_global_gem_context global = { {
2269         .shrink = i915_global_gem_context_shrink,
2270         .exit = i915_global_gem_context_exit,
2271 } };
2272
2273 int __init i915_global_gem_context_init(void)
2274 {
2275         global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
2276         if (!global.slab_luts)
2277                 return -ENOMEM;
2278
2279         i915_global_register(&global.base);
2280         return 0;
2281 }