Merge tag 'mtd/for-5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_request.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/dma-fence-array.h>
26 #include <linux/irq_work.h>
27 #include <linux/prefetch.h>
28 #include <linux/sched.h>
29 #include <linux/sched/clock.h>
30 #include <linux/sched/signal.h>
31
32 #include "gem/i915_gem_context.h"
33 #include "gt/intel_context.h"
34
35 #include "i915_active.h"
36 #include "i915_drv.h"
37 #include "i915_globals.h"
38 #include "i915_trace.h"
39 #include "intel_pm.h"
40
41 struct execute_cb {
42         struct list_head link;
43         struct irq_work work;
44         struct i915_sw_fence *fence;
45         void (*hook)(struct i915_request *rq, struct dma_fence *signal);
46         struct i915_request *signal;
47 };
48
49 static struct i915_global_request {
50         struct i915_global base;
51         struct kmem_cache *slab_requests;
52         struct kmem_cache *slab_dependencies;
53         struct kmem_cache *slab_execute_cbs;
54 } global;
55
56 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
57 {
58         return "i915";
59 }
60
61 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
62 {
63         /*
64          * The timeline struct (as part of the ppgtt underneath a context)
65          * may be freed when the request is no longer in use by the GPU.
66          * We could extend the life of a context to beyond that of all
67          * fences, possibly keeping the hw resource around indefinitely,
68          * or we just give them a false name. Since
69          * dma_fence_ops.get_timeline_name is a debug feature, the occasional
70          * lie seems justifiable.
71          */
72         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
73                 return "signaled";
74
75         return to_request(fence)->gem_context->name ?: "[i915]";
76 }
77
78 static bool i915_fence_signaled(struct dma_fence *fence)
79 {
80         return i915_request_completed(to_request(fence));
81 }
82
83 static bool i915_fence_enable_signaling(struct dma_fence *fence)
84 {
85         return i915_request_enable_breadcrumb(to_request(fence));
86 }
87
88 static signed long i915_fence_wait(struct dma_fence *fence,
89                                    bool interruptible,
90                                    signed long timeout)
91 {
92         return i915_request_wait(to_request(fence),
93                                  interruptible | I915_WAIT_PRIORITY,
94                                  timeout);
95 }
96
97 static void i915_fence_release(struct dma_fence *fence)
98 {
99         struct i915_request *rq = to_request(fence);
100
101         /*
102          * The request is put onto a RCU freelist (i.e. the address
103          * is immediately reused), mark the fences as being freed now.
104          * Otherwise the debugobjects for the fences are only marked as
105          * freed when the slab cache itself is freed, and so we would get
106          * caught trying to reuse dead objects.
107          */
108         i915_sw_fence_fini(&rq->submit);
109         i915_sw_fence_fini(&rq->semaphore);
110
111         kmem_cache_free(global.slab_requests, rq);
112 }
113
114 const struct dma_fence_ops i915_fence_ops = {
115         .get_driver_name = i915_fence_get_driver_name,
116         .get_timeline_name = i915_fence_get_timeline_name,
117         .enable_signaling = i915_fence_enable_signaling,
118         .signaled = i915_fence_signaled,
119         .wait = i915_fence_wait,
120         .release = i915_fence_release,
121 };
122
123 static void irq_execute_cb(struct irq_work *wrk)
124 {
125         struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
126
127         i915_sw_fence_complete(cb->fence);
128         kmem_cache_free(global.slab_execute_cbs, cb);
129 }
130
131 static void irq_execute_cb_hook(struct irq_work *wrk)
132 {
133         struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
134
135         cb->hook(container_of(cb->fence, struct i915_request, submit),
136                  &cb->signal->fence);
137         i915_request_put(cb->signal);
138
139         irq_execute_cb(wrk);
140 }
141
142 static void __notify_execute_cb(struct i915_request *rq)
143 {
144         struct execute_cb *cb;
145
146         lockdep_assert_held(&rq->lock);
147
148         if (list_empty(&rq->execute_cb))
149                 return;
150
151         list_for_each_entry(cb, &rq->execute_cb, link)
152                 irq_work_queue(&cb->work);
153
154         /*
155          * XXX Rollback on __i915_request_unsubmit()
156          *
157          * In the future, perhaps when we have an active time-slicing scheduler,
158          * it will be interesting to unsubmit parallel execution and remove
159          * busywaits from the GPU until their master is restarted. This is
160          * quite hairy, we have to carefully rollback the fence and do a
161          * preempt-to-idle cycle on the target engine, all the while the
162          * master execute_cb may refire.
163          */
164         INIT_LIST_HEAD(&rq->execute_cb);
165 }
166
167 static inline void
168 remove_from_client(struct i915_request *request)
169 {
170         struct drm_i915_file_private *file_priv;
171
172         file_priv = READ_ONCE(request->file_priv);
173         if (!file_priv)
174                 return;
175
176         spin_lock(&file_priv->mm.lock);
177         if (request->file_priv) {
178                 list_del(&request->client_link);
179                 request->file_priv = NULL;
180         }
181         spin_unlock(&file_priv->mm.lock);
182 }
183
184 static void free_capture_list(struct i915_request *request)
185 {
186         struct i915_capture_list *capture;
187
188         capture = request->capture_list;
189         while (capture) {
190                 struct i915_capture_list *next = capture->next;
191
192                 kfree(capture);
193                 capture = next;
194         }
195 }
196
197 static bool i915_request_retire(struct i915_request *rq)
198 {
199         struct i915_active_request *active, *next;
200
201         lockdep_assert_held(&rq->timeline->mutex);
202         if (!i915_request_completed(rq))
203                 return false;
204
205         GEM_TRACE("%s fence %llx:%lld, current %d\n",
206                   rq->engine->name,
207                   rq->fence.context, rq->fence.seqno,
208                   hwsp_seqno(rq));
209
210         GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
211         trace_i915_request_retire(rq);
212
213         /*
214          * We know the GPU must have read the request to have
215          * sent us the seqno + interrupt, so use the position
216          * of tail of the request to update the last known position
217          * of the GPU head.
218          *
219          * Note this requires that we are always called in request
220          * completion order.
221          */
222         GEM_BUG_ON(!list_is_first(&rq->link, &rq->timeline->requests));
223         rq->ring->head = rq->postfix;
224
225         /*
226          * Walk through the active list, calling retire on each. This allows
227          * objects to track their GPU activity and mark themselves as idle
228          * when their *last* active request is completed (updating state
229          * tracking lists for eviction, active references for GEM, etc).
230          *
231          * As the ->retire() may free the node, we decouple it first and
232          * pass along the auxiliary information (to avoid dereferencing
233          * the node after the callback).
234          */
235         list_for_each_entry_safe(active, next, &rq->active_list, link) {
236                 /*
237                  * In microbenchmarks or focusing upon time inside the kernel,
238                  * we may spend an inordinate amount of time simply handling
239                  * the retirement of requests and processing their callbacks.
240                  * Of which, this loop itself is particularly hot due to the
241                  * cache misses when jumping around the list of
242                  * i915_active_request.  So we try to keep this loop as
243                  * streamlined as possible and also prefetch the next
244                  * i915_active_request to try and hide the likely cache miss.
245                  */
246                 prefetchw(next);
247
248                 INIT_LIST_HEAD(&active->link);
249                 RCU_INIT_POINTER(active->request, NULL);
250
251                 active->retire(active, rq);
252         }
253
254         local_irq_disable();
255
256         /*
257          * We only loosely track inflight requests across preemption,
258          * and so we may find ourselves attempting to retire a _completed_
259          * request that we have removed from the HW and put back on a run
260          * queue.
261          */
262         spin_lock(&rq->engine->active.lock);
263         list_del(&rq->sched.link);
264         spin_unlock(&rq->engine->active.lock);
265
266         spin_lock(&rq->lock);
267         i915_request_mark_complete(rq);
268         if (!i915_request_signaled(rq))
269                 dma_fence_signal_locked(&rq->fence);
270         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
271                 i915_request_cancel_breadcrumb(rq);
272         if (i915_request_has_waitboost(rq)) {
273                 GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
274                 atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
275         }
276         if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
277                 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
278                 __notify_execute_cb(rq);
279         }
280         GEM_BUG_ON(!list_empty(&rq->execute_cb));
281         spin_unlock(&rq->lock);
282
283         local_irq_enable();
284
285         remove_from_client(rq);
286         list_del(&rq->link);
287
288         intel_context_exit(rq->hw_context);
289         intel_context_unpin(rq->hw_context);
290
291         free_capture_list(rq);
292         i915_sched_node_fini(&rq->sched);
293         i915_request_put(rq);
294
295         return true;
296 }
297
298 void i915_request_retire_upto(struct i915_request *rq)
299 {
300         struct intel_timeline * const tl = rq->timeline;
301         struct i915_request *tmp;
302
303         GEM_TRACE("%s fence %llx:%lld, current %d\n",
304                   rq->engine->name,
305                   rq->fence.context, rq->fence.seqno,
306                   hwsp_seqno(rq));
307
308         lockdep_assert_held(&tl->mutex);
309         GEM_BUG_ON(!i915_request_completed(rq));
310
311         do {
312                 tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
313         } while (i915_request_retire(tmp) && tmp != rq);
314 }
315
316 static int
317 __i915_request_await_execution(struct i915_request *rq,
318                                struct i915_request *signal,
319                                void (*hook)(struct i915_request *rq,
320                                             struct dma_fence *signal),
321                                gfp_t gfp)
322 {
323         struct execute_cb *cb;
324
325         if (i915_request_is_active(signal)) {
326                 if (hook)
327                         hook(rq, &signal->fence);
328                 return 0;
329         }
330
331         cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
332         if (!cb)
333                 return -ENOMEM;
334
335         cb->fence = &rq->submit;
336         i915_sw_fence_await(cb->fence);
337         init_irq_work(&cb->work, irq_execute_cb);
338
339         if (hook) {
340                 cb->hook = hook;
341                 cb->signal = i915_request_get(signal);
342                 cb->work.func = irq_execute_cb_hook;
343         }
344
345         spin_lock_irq(&signal->lock);
346         if (i915_request_is_active(signal)) {
347                 if (hook) {
348                         hook(rq, &signal->fence);
349                         i915_request_put(signal);
350                 }
351                 i915_sw_fence_complete(cb->fence);
352                 kmem_cache_free(global.slab_execute_cbs, cb);
353         } else {
354                 list_add_tail(&cb->link, &signal->execute_cb);
355         }
356         spin_unlock_irq(&signal->lock);
357
358         return 0;
359 }
360
361 void __i915_request_submit(struct i915_request *request)
362 {
363         struct intel_engine_cs *engine = request->engine;
364
365         GEM_TRACE("%s fence %llx:%lld, current %d\n",
366                   engine->name,
367                   request->fence.context, request->fence.seqno,
368                   hwsp_seqno(request));
369
370         GEM_BUG_ON(!irqs_disabled());
371         lockdep_assert_held(&engine->active.lock);
372
373         if (i915_gem_context_is_banned(request->gem_context))
374                 i915_request_skip(request, -EIO);
375
376         /*
377          * Are we using semaphores when the gpu is already saturated?
378          *
379          * Using semaphores incurs a cost in having the GPU poll a
380          * memory location, busywaiting for it to change. The continual
381          * memory reads can have a noticeable impact on the rest of the
382          * system with the extra bus traffic, stalling the cpu as it too
383          * tries to access memory across the bus (perf stat -e bus-cycles).
384          *
385          * If we installed a semaphore on this request and we only submit
386          * the request after the signaler completed, that indicates the
387          * system is overloaded and using semaphores at this time only
388          * increases the amount of work we are doing. If so, we disable
389          * further use of semaphores until we are idle again, whence we
390          * optimistically try again.
391          */
392         if (request->sched.semaphores &&
393             i915_sw_fence_signaled(&request->semaphore))
394                 engine->saturated |= request->sched.semaphores;
395
396         /* We may be recursing from the signal callback of another i915 fence */
397         spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
398
399         list_move_tail(&request->sched.link, &engine->active.requests);
400
401         GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
402         set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
403
404         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
405             !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
406             !i915_request_enable_breadcrumb(request))
407                 intel_engine_queue_breadcrumbs(engine);
408
409         __notify_execute_cb(request);
410
411         spin_unlock(&request->lock);
412
413         engine->emit_fini_breadcrumb(request,
414                                      request->ring->vaddr + request->postfix);
415
416         engine->serial++;
417
418         trace_i915_request_execute(request);
419 }
420
421 void i915_request_submit(struct i915_request *request)
422 {
423         struct intel_engine_cs *engine = request->engine;
424         unsigned long flags;
425
426         /* Will be called from irq-context when using foreign fences. */
427         spin_lock_irqsave(&engine->active.lock, flags);
428
429         __i915_request_submit(request);
430
431         spin_unlock_irqrestore(&engine->active.lock, flags);
432 }
433
434 void __i915_request_unsubmit(struct i915_request *request)
435 {
436         struct intel_engine_cs *engine = request->engine;
437
438         GEM_TRACE("%s fence %llx:%lld, current %d\n",
439                   engine->name,
440                   request->fence.context, request->fence.seqno,
441                   hwsp_seqno(request));
442
443         GEM_BUG_ON(!irqs_disabled());
444         lockdep_assert_held(&engine->active.lock);
445
446         /*
447          * Only unwind in reverse order, required so that the per-context list
448          * is kept in seqno/ring order.
449          */
450
451         /* We may be recursing from the signal callback of another i915 fence */
452         spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
453
454         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
455                 i915_request_cancel_breadcrumb(request);
456
457         GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
458         clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
459
460         spin_unlock(&request->lock);
461
462         /* We've already spun, don't charge on resubmitting. */
463         if (request->sched.semaphores && i915_request_started(request)) {
464                 request->sched.attr.priority |= I915_PRIORITY_NOSEMAPHORE;
465                 request->sched.semaphores = 0;
466         }
467
468         /*
469          * We don't need to wake_up any waiters on request->execute, they
470          * will get woken by any other event or us re-adding this request
471          * to the engine timeline (__i915_request_submit()). The waiters
472          * should be quite adapt at finding that the request now has a new
473          * global_seqno to the one they went to sleep on.
474          */
475 }
476
477 void i915_request_unsubmit(struct i915_request *request)
478 {
479         struct intel_engine_cs *engine = request->engine;
480         unsigned long flags;
481
482         /* Will be called from irq-context when using foreign fences. */
483         spin_lock_irqsave(&engine->active.lock, flags);
484
485         __i915_request_unsubmit(request);
486
487         spin_unlock_irqrestore(&engine->active.lock, flags);
488 }
489
490 static int __i915_sw_fence_call
491 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
492 {
493         struct i915_request *request =
494                 container_of(fence, typeof(*request), submit);
495
496         switch (state) {
497         case FENCE_COMPLETE:
498                 trace_i915_request_submit(request);
499
500                 if (unlikely(fence->error))
501                         i915_request_skip(request, fence->error);
502
503                 /*
504                  * We need to serialize use of the submit_request() callback
505                  * with its hotplugging performed during an emergency
506                  * i915_gem_set_wedged().  We use the RCU mechanism to mark the
507                  * critical section in order to force i915_gem_set_wedged() to
508                  * wait until the submit_request() is completed before
509                  * proceeding.
510                  */
511                 rcu_read_lock();
512                 request->engine->submit_request(request);
513                 rcu_read_unlock();
514                 break;
515
516         case FENCE_FREE:
517                 i915_request_put(request);
518                 break;
519         }
520
521         return NOTIFY_DONE;
522 }
523
524 static int __i915_sw_fence_call
525 semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
526 {
527         struct i915_request *request =
528                 container_of(fence, typeof(*request), semaphore);
529
530         switch (state) {
531         case FENCE_COMPLETE:
532                 i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE);
533                 break;
534
535         case FENCE_FREE:
536                 i915_request_put(request);
537                 break;
538         }
539
540         return NOTIFY_DONE;
541 }
542
543 static void retire_requests(struct intel_timeline *tl)
544 {
545         struct i915_request *rq, *rn;
546
547         list_for_each_entry_safe(rq, rn, &tl->requests, link)
548                 if (!i915_request_retire(rq))
549                         break;
550 }
551
552 static noinline struct i915_request *
553 request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
554 {
555         struct i915_request *rq;
556
557         if (list_empty(&tl->requests))
558                 goto out;
559
560         if (!gfpflags_allow_blocking(gfp))
561                 goto out;
562
563         /* Move our oldest request to the slab-cache (if not in use!) */
564         rq = list_first_entry(&tl->requests, typeof(*rq), link);
565         i915_request_retire(rq);
566
567         rq = kmem_cache_alloc(global.slab_requests,
568                               gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
569         if (rq)
570                 return rq;
571
572         /* Ratelimit ourselves to prevent oom from malicious clients */
573         rq = list_last_entry(&tl->requests, typeof(*rq), link);
574         cond_synchronize_rcu(rq->rcustate);
575
576         /* Retire our old requests in the hope that we free some */
577         retire_requests(tl);
578
579 out:
580         return kmem_cache_alloc(global.slab_requests, gfp);
581 }
582
583 struct i915_request *
584 __i915_request_create(struct intel_context *ce, gfp_t gfp)
585 {
586         struct intel_timeline *tl = ce->timeline;
587         struct i915_request *rq;
588         u32 seqno;
589         int ret;
590
591         might_sleep_if(gfpflags_allow_blocking(gfp));
592
593         /* Check that the caller provided an already pinned context */
594         __intel_context_pin(ce);
595
596         /*
597          * Beware: Dragons be flying overhead.
598          *
599          * We use RCU to look up requests in flight. The lookups may
600          * race with the request being allocated from the slab freelist.
601          * That is the request we are writing to here, may be in the process
602          * of being read by __i915_active_request_get_rcu(). As such,
603          * we have to be very careful when overwriting the contents. During
604          * the RCU lookup, we change chase the request->engine pointer,
605          * read the request->global_seqno and increment the reference count.
606          *
607          * The reference count is incremented atomically. If it is zero,
608          * the lookup knows the request is unallocated and complete. Otherwise,
609          * it is either still in use, or has been reallocated and reset
610          * with dma_fence_init(). This increment is safe for release as we
611          * check that the request we have a reference to and matches the active
612          * request.
613          *
614          * Before we increment the refcount, we chase the request->engine
615          * pointer. We must not call kmem_cache_zalloc() or else we set
616          * that pointer to NULL and cause a crash during the lookup. If
617          * we see the request is completed (based on the value of the
618          * old engine and seqno), the lookup is complete and reports NULL.
619          * If we decide the request is not completed (new engine or seqno),
620          * then we grab a reference and double check that it is still the
621          * active request - which it won't be and restart the lookup.
622          *
623          * Do not use kmem_cache_zalloc() here!
624          */
625         rq = kmem_cache_alloc(global.slab_requests,
626                               gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
627         if (unlikely(!rq)) {
628                 rq = request_alloc_slow(tl, gfp);
629                 if (!rq) {
630                         ret = -ENOMEM;
631                         goto err_unreserve;
632                 }
633         }
634
635         ret = intel_timeline_get_seqno(tl, rq, &seqno);
636         if (ret)
637                 goto err_free;
638
639         rq->i915 = ce->engine->i915;
640         rq->hw_context = ce;
641         rq->gem_context = ce->gem_context;
642         rq->engine = ce->engine;
643         rq->ring = ce->ring;
644         rq->timeline = tl;
645         rq->hwsp_seqno = tl->hwsp_seqno;
646         rq->hwsp_cacheline = tl->hwsp_cacheline;
647         rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
648
649         spin_lock_init(&rq->lock);
650         dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
651                        tl->fence_context, seqno);
652
653         /* We bump the ref for the fence chain */
654         i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
655         i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify);
656
657         i915_sched_node_init(&rq->sched);
658
659         /* No zalloc, must clear what we need by hand */
660         rq->file_priv = NULL;
661         rq->batch = NULL;
662         rq->capture_list = NULL;
663         rq->flags = 0;
664         rq->execution_mask = ALL_ENGINES;
665
666         INIT_LIST_HEAD(&rq->active_list);
667         INIT_LIST_HEAD(&rq->execute_cb);
668
669         /*
670          * Reserve space in the ring buffer for all the commands required to
671          * eventually emit this request. This is to guarantee that the
672          * i915_request_add() call can't fail. Note that the reserve may need
673          * to be redone if the request is not actually submitted straight
674          * away, e.g. because a GPU scheduler has deferred it.
675          *
676          * Note that due to how we add reserved_space to intel_ring_begin()
677          * we need to double our request to ensure that if we need to wrap
678          * around inside i915_request_add() there is sufficient space at
679          * the beginning of the ring as well.
680          */
681         rq->reserved_space =
682                 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
683
684         /*
685          * Record the position of the start of the request so that
686          * should we detect the updated seqno part-way through the
687          * GPU processing the request, we never over-estimate the
688          * position of the head.
689          */
690         rq->head = rq->ring->emit;
691
692         ret = rq->engine->request_alloc(rq);
693         if (ret)
694                 goto err_unwind;
695
696         rq->infix = rq->ring->emit; /* end of header; start of user payload */
697
698         intel_context_mark_active(ce);
699         return rq;
700
701 err_unwind:
702         ce->ring->emit = rq->head;
703
704         /* Make sure we didn't add ourselves to external state before freeing */
705         GEM_BUG_ON(!list_empty(&rq->active_list));
706         GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
707         GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
708
709 err_free:
710         kmem_cache_free(global.slab_requests, rq);
711 err_unreserve:
712         intel_context_unpin(ce);
713         return ERR_PTR(ret);
714 }
715
716 struct i915_request *
717 i915_request_create(struct intel_context *ce)
718 {
719         struct i915_request *rq;
720         struct intel_timeline *tl;
721
722         tl = intel_context_timeline_lock(ce);
723         if (IS_ERR(tl))
724                 return ERR_CAST(tl);
725
726         /* Move our oldest request to the slab-cache (if not in use!) */
727         rq = list_first_entry(&tl->requests, typeof(*rq), link);
728         if (!list_is_last(&rq->link, &tl->requests))
729                 i915_request_retire(rq);
730
731         intel_context_enter(ce);
732         rq = __i915_request_create(ce, GFP_KERNEL);
733         intel_context_exit(ce); /* active reference transferred to request */
734         if (IS_ERR(rq))
735                 goto err_unlock;
736
737         /* Check that we do not interrupt ourselves with a new request */
738         rq->cookie = lockdep_pin_lock(&tl->mutex);
739
740         return rq;
741
742 err_unlock:
743         intel_context_timeline_unlock(tl);
744         return rq;
745 }
746
747 static int
748 i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
749 {
750         if (list_is_first(&signal->link, &signal->timeline->requests))
751                 return 0;
752
753         signal = list_prev_entry(signal, link);
754         if (intel_timeline_sync_is_later(rq->timeline, &signal->fence))
755                 return 0;
756
757         return i915_sw_fence_await_dma_fence(&rq->submit,
758                                              &signal->fence, 0,
759                                              I915_FENCE_GFP);
760 }
761
762 static intel_engine_mask_t
763 already_busywaiting(struct i915_request *rq)
764 {
765         /*
766          * Polling a semaphore causes bus traffic, delaying other users of
767          * both the GPU and CPU. We want to limit the impact on others,
768          * while taking advantage of early submission to reduce GPU
769          * latency. Therefore we restrict ourselves to not using more
770          * than one semaphore from each source, and not using a semaphore
771          * if we have detected the engine is saturated (i.e. would not be
772          * submitted early and cause bus traffic reading an already passed
773          * semaphore).
774          *
775          * See the are-we-too-late? check in __i915_request_submit().
776          */
777         return rq->sched.semaphores | rq->engine->saturated;
778 }
779
780 static int
781 emit_semaphore_wait(struct i915_request *to,
782                     struct i915_request *from,
783                     gfp_t gfp)
784 {
785         u32 hwsp_offset;
786         u32 *cs;
787         int err;
788
789         GEM_BUG_ON(!from->timeline->has_initial_breadcrumb);
790         GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
791
792         /* Just emit the first semaphore we see as request space is limited. */
793         if (already_busywaiting(to) & from->engine->mask)
794                 return i915_sw_fence_await_dma_fence(&to->submit,
795                                                      &from->fence, 0,
796                                                      I915_FENCE_GFP);
797
798         err = i915_request_await_start(to, from);
799         if (err < 0)
800                 return err;
801
802         /* Only submit our spinner after the signaler is running! */
803         err = __i915_request_await_execution(to, from, NULL, gfp);
804         if (err)
805                 return err;
806
807         /* We need to pin the signaler's HWSP until we are finished reading. */
808         err = intel_timeline_read_hwsp(from, to, &hwsp_offset);
809         if (err)
810                 return err;
811
812         cs = intel_ring_begin(to, 4);
813         if (IS_ERR(cs))
814                 return PTR_ERR(cs);
815
816         /*
817          * Using greater-than-or-equal here means we have to worry
818          * about seqno wraparound. To side step that issue, we swap
819          * the timeline HWSP upon wrapping, so that everyone listening
820          * for the old (pre-wrap) values do not see the much smaller
821          * (post-wrap) values than they were expecting (and so wait
822          * forever).
823          */
824         *cs++ = MI_SEMAPHORE_WAIT |
825                 MI_SEMAPHORE_GLOBAL_GTT |
826                 MI_SEMAPHORE_POLL |
827                 MI_SEMAPHORE_SAD_GTE_SDD;
828         *cs++ = from->fence.seqno;
829         *cs++ = hwsp_offset;
830         *cs++ = 0;
831
832         intel_ring_advance(to, cs);
833         to->sched.semaphores |= from->engine->mask;
834         to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
835         return 0;
836 }
837
838 static int
839 i915_request_await_request(struct i915_request *to, struct i915_request *from)
840 {
841         int ret;
842
843         GEM_BUG_ON(to == from);
844         GEM_BUG_ON(to->timeline == from->timeline);
845
846         if (i915_request_completed(from))
847                 return 0;
848
849         if (to->engine->schedule) {
850                 ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
851                 if (ret < 0)
852                         return ret;
853         }
854
855         if (to->engine == from->engine) {
856                 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
857                                                        &from->submit,
858                                                        I915_FENCE_GFP);
859         } else if (intel_engine_has_semaphores(to->engine) &&
860                    to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
861                 ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
862         } else {
863                 ret = i915_sw_fence_await_dma_fence(&to->submit,
864                                                     &from->fence, 0,
865                                                     I915_FENCE_GFP);
866         }
867         if (ret < 0)
868                 return ret;
869
870         if (to->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN) {
871                 ret = i915_sw_fence_await_dma_fence(&to->semaphore,
872                                                     &from->fence, 0,
873                                                     I915_FENCE_GFP);
874                 if (ret < 0)
875                         return ret;
876         }
877
878         return 0;
879 }
880
881 int
882 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
883 {
884         struct dma_fence **child = &fence;
885         unsigned int nchild = 1;
886         int ret;
887
888         /*
889          * Note that if the fence-array was created in signal-on-any mode,
890          * we should *not* decompose it into its individual fences. However,
891          * we don't currently store which mode the fence-array is operating
892          * in. Fortunately, the only user of signal-on-any is private to
893          * amdgpu and we should not see any incoming fence-array from
894          * sync-file being in signal-on-any mode.
895          */
896         if (dma_fence_is_array(fence)) {
897                 struct dma_fence_array *array = to_dma_fence_array(fence);
898
899                 child = array->fences;
900                 nchild = array->num_fences;
901                 GEM_BUG_ON(!nchild);
902         }
903
904         do {
905                 fence = *child++;
906                 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
907                         continue;
908
909                 /*
910                  * Requests on the same timeline are explicitly ordered, along
911                  * with their dependencies, by i915_request_add() which ensures
912                  * that requests are submitted in-order through each ring.
913                  */
914                 if (fence->context == rq->fence.context)
915                         continue;
916
917                 /* Squash repeated waits to the same timelines */
918                 if (fence->context &&
919                     intel_timeline_sync_is_later(rq->timeline, fence))
920                         continue;
921
922                 if (dma_fence_is_i915(fence))
923                         ret = i915_request_await_request(rq, to_request(fence));
924                 else
925                         ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
926                                                             I915_FENCE_TIMEOUT,
927                                                             I915_FENCE_GFP);
928                 if (ret < 0)
929                         return ret;
930
931                 /* Record the latest fence used against each timeline */
932                 if (fence->context)
933                         intel_timeline_sync_set(rq->timeline, fence);
934         } while (--nchild);
935
936         return 0;
937 }
938
939 int
940 i915_request_await_execution(struct i915_request *rq,
941                              struct dma_fence *fence,
942                              void (*hook)(struct i915_request *rq,
943                                           struct dma_fence *signal))
944 {
945         struct dma_fence **child = &fence;
946         unsigned int nchild = 1;
947         int ret;
948
949         if (dma_fence_is_array(fence)) {
950                 struct dma_fence_array *array = to_dma_fence_array(fence);
951
952                 /* XXX Error for signal-on-any fence arrays */
953
954                 child = array->fences;
955                 nchild = array->num_fences;
956                 GEM_BUG_ON(!nchild);
957         }
958
959         do {
960                 fence = *child++;
961                 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
962                         continue;
963
964                 /*
965                  * We don't squash repeated fence dependencies here as we
966                  * want to run our callback in all cases.
967                  */
968
969                 if (dma_fence_is_i915(fence))
970                         ret = __i915_request_await_execution(rq,
971                                                              to_request(fence),
972                                                              hook,
973                                                              I915_FENCE_GFP);
974                 else
975                         ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
976                                                             I915_FENCE_TIMEOUT,
977                                                             GFP_KERNEL);
978                 if (ret < 0)
979                         return ret;
980         } while (--nchild);
981
982         return 0;
983 }
984
985 /**
986  * i915_request_await_object - set this request to (async) wait upon a bo
987  * @to: request we are wishing to use
988  * @obj: object which may be in use on another ring.
989  * @write: whether the wait is on behalf of a writer
990  *
991  * This code is meant to abstract object synchronization with the GPU.
992  * Conceptually we serialise writes between engines inside the GPU.
993  * We only allow one engine to write into a buffer at any time, but
994  * multiple readers. To ensure each has a coherent view of memory, we must:
995  *
996  * - If there is an outstanding write request to the object, the new
997  *   request must wait for it to complete (either CPU or in hw, requests
998  *   on the same ring will be naturally ordered).
999  *
1000  * - If we are a write request (pending_write_domain is set), the new
1001  *   request must wait for outstanding read requests to complete.
1002  *
1003  * Returns 0 if successful, else propagates up the lower layer error.
1004  */
1005 int
1006 i915_request_await_object(struct i915_request *to,
1007                           struct drm_i915_gem_object *obj,
1008                           bool write)
1009 {
1010         struct dma_fence *excl;
1011         int ret = 0;
1012
1013         if (write) {
1014                 struct dma_fence **shared;
1015                 unsigned int count, i;
1016
1017                 ret = dma_resv_get_fences_rcu(obj->base.resv,
1018                                                         &excl, &count, &shared);
1019                 if (ret)
1020                         return ret;
1021
1022                 for (i = 0; i < count; i++) {
1023                         ret = i915_request_await_dma_fence(to, shared[i]);
1024                         if (ret)
1025                                 break;
1026
1027                         dma_fence_put(shared[i]);
1028                 }
1029
1030                 for (; i < count; i++)
1031                         dma_fence_put(shared[i]);
1032                 kfree(shared);
1033         } else {
1034                 excl = dma_resv_get_excl_rcu(obj->base.resv);
1035         }
1036
1037         if (excl) {
1038                 if (ret == 0)
1039                         ret = i915_request_await_dma_fence(to, excl);
1040
1041                 dma_fence_put(excl);
1042         }
1043
1044         return ret;
1045 }
1046
1047 void i915_request_skip(struct i915_request *rq, int error)
1048 {
1049         void *vaddr = rq->ring->vaddr;
1050         u32 head;
1051
1052         GEM_BUG_ON(!IS_ERR_VALUE((long)error));
1053         dma_fence_set_error(&rq->fence, error);
1054
1055         if (rq->infix == rq->postfix)
1056                 return;
1057
1058         /*
1059          * As this request likely depends on state from the lost
1060          * context, clear out all the user operations leaving the
1061          * breadcrumb at the end (so we get the fence notifications).
1062          */
1063         head = rq->infix;
1064         if (rq->postfix < head) {
1065                 memset(vaddr + head, 0, rq->ring->size - head);
1066                 head = 0;
1067         }
1068         memset(vaddr + head, 0, rq->postfix - head);
1069         rq->infix = rq->postfix;
1070 }
1071
1072 static struct i915_request *
1073 __i915_request_add_to_timeline(struct i915_request *rq)
1074 {
1075         struct intel_timeline *timeline = rq->timeline;
1076         struct i915_request *prev;
1077
1078         /*
1079          * Dependency tracking and request ordering along the timeline
1080          * is special cased so that we can eliminate redundant ordering
1081          * operations while building the request (we know that the timeline
1082          * itself is ordered, and here we guarantee it).
1083          *
1084          * As we know we will need to emit tracking along the timeline,
1085          * we embed the hooks into our request struct -- at the cost of
1086          * having to have specialised no-allocation interfaces (which will
1087          * be beneficial elsewhere).
1088          *
1089          * A second benefit to open-coding i915_request_await_request is
1090          * that we can apply a slight variant of the rules specialised
1091          * for timelines that jump between engines (such as virtual engines).
1092          * If we consider the case of virtual engine, we must emit a dma-fence
1093          * to prevent scheduling of the second request until the first is
1094          * complete (to maximise our greedy late load balancing) and this
1095          * precludes optimising to use semaphores serialisation of a single
1096          * timeline across engines.
1097          */
1098         prev = rcu_dereference_protected(timeline->last_request.request,
1099                                          lockdep_is_held(&timeline->mutex));
1100         if (prev && !i915_request_completed(prev)) {
1101                 if (is_power_of_2(prev->engine->mask | rq->engine->mask))
1102                         i915_sw_fence_await_sw_fence(&rq->submit,
1103                                                      &prev->submit,
1104                                                      &rq->submitq);
1105                 else
1106                         __i915_sw_fence_await_dma_fence(&rq->submit,
1107                                                         &prev->fence,
1108                                                         &rq->dmaq);
1109                 if (rq->engine->schedule)
1110                         __i915_sched_node_add_dependency(&rq->sched,
1111                                                          &prev->sched,
1112                                                          &rq->dep,
1113                                                          0);
1114         }
1115
1116         list_add_tail(&rq->link, &timeline->requests);
1117
1118         /*
1119          * Make sure that no request gazumped us - if it was allocated after
1120          * our i915_request_alloc() and called __i915_request_add() before
1121          * us, the timeline will hold its seqno which is later than ours.
1122          */
1123         GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
1124         __i915_active_request_set(&timeline->last_request, rq);
1125
1126         return prev;
1127 }
1128
1129 /*
1130  * NB: This function is not allowed to fail. Doing so would mean the the
1131  * request is not being tracked for completion but the work itself is
1132  * going to happen on the hardware. This would be a Bad Thing(tm).
1133  */
1134 struct i915_request *__i915_request_commit(struct i915_request *rq)
1135 {
1136         struct intel_engine_cs *engine = rq->engine;
1137         struct intel_ring *ring = rq->ring;
1138         u32 *cs;
1139
1140         GEM_TRACE("%s fence %llx:%lld\n",
1141                   engine->name, rq->fence.context, rq->fence.seqno);
1142
1143         /*
1144          * To ensure that this call will not fail, space for its emissions
1145          * should already have been reserved in the ring buffer. Let the ring
1146          * know that it is time to use that space up.
1147          */
1148         GEM_BUG_ON(rq->reserved_space > ring->space);
1149         rq->reserved_space = 0;
1150         rq->emitted_jiffies = jiffies;
1151
1152         /*
1153          * Record the position of the start of the breadcrumb so that
1154          * should we detect the updated seqno part-way through the
1155          * GPU processing the request, we never over-estimate the
1156          * position of the ring's HEAD.
1157          */
1158         cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
1159         GEM_BUG_ON(IS_ERR(cs));
1160         rq->postfix = intel_ring_offset(rq, cs);
1161
1162         return __i915_request_add_to_timeline(rq);
1163 }
1164
1165 void __i915_request_queue(struct i915_request *rq,
1166                           const struct i915_sched_attr *attr)
1167 {
1168         /*
1169          * Let the backend know a new request has arrived that may need
1170          * to adjust the existing execution schedule due to a high priority
1171          * request - i.e. we may want to preempt the current request in order
1172          * to run a high priority dependency chain *before* we can execute this
1173          * request.
1174          *
1175          * This is called before the request is ready to run so that we can
1176          * decide whether to preempt the entire chain so that it is ready to
1177          * run at the earliest possible convenience.
1178          */
1179         i915_sw_fence_commit(&rq->semaphore);
1180         if (attr && rq->engine->schedule)
1181                 rq->engine->schedule(rq, attr);
1182         i915_sw_fence_commit(&rq->submit);
1183 }
1184
1185 void i915_request_add(struct i915_request *rq)
1186 {
1187         struct i915_sched_attr attr = rq->gem_context->sched;
1188         struct intel_timeline * const tl = rq->timeline;
1189         struct i915_request *prev;
1190
1191         lockdep_assert_held(&tl->mutex);
1192         lockdep_unpin_lock(&tl->mutex, rq->cookie);
1193
1194         trace_i915_request_add(rq);
1195
1196         prev = __i915_request_commit(rq);
1197
1198         /*
1199          * Boost actual workloads past semaphores!
1200          *
1201          * With semaphores we spin on one engine waiting for another,
1202          * simply to reduce the latency of starting our work when
1203          * the signaler completes. However, if there is any other
1204          * work that we could be doing on this engine instead, that
1205          * is better utilisation and will reduce the overall duration
1206          * of the current work. To avoid PI boosting a semaphore
1207          * far in the distance past over useful work, we keep a history
1208          * of any semaphore use along our dependency chain.
1209          */
1210         if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
1211                 attr.priority |= I915_PRIORITY_NOSEMAPHORE;
1212
1213         /*
1214          * Boost priorities to new clients (new request flows).
1215          *
1216          * Allow interactive/synchronous clients to jump ahead of
1217          * the bulk clients. (FQ_CODEL)
1218          */
1219         if (list_empty(&rq->sched.signalers_list))
1220                 attr.priority |= I915_PRIORITY_WAIT;
1221
1222         local_bh_disable();
1223         __i915_request_queue(rq, &attr);
1224         local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
1225
1226         /*
1227          * In typical scenarios, we do not expect the previous request on
1228          * the timeline to be still tracked by timeline->last_request if it
1229          * has been completed. If the completed request is still here, that
1230          * implies that request retirement is a long way behind submission,
1231          * suggesting that we haven't been retiring frequently enough from
1232          * the combination of retire-before-alloc, waiters and the background
1233          * retirement worker. So if the last request on this timeline was
1234          * already completed, do a catch up pass, flushing the retirement queue
1235          * up to this client. Since we have now moved the heaviest operations
1236          * during retirement onto secondary workers, such as freeing objects
1237          * or contexts, retiring a bunch of requests is mostly list management
1238          * (and cache misses), and so we should not be overly penalizing this
1239          * client by performing excess work, though we may still performing
1240          * work on behalf of others -- but instead we should benefit from
1241          * improved resource management. (Well, that's the theory at least.)
1242          */
1243         if (prev && i915_request_completed(prev) && prev->timeline == tl)
1244                 i915_request_retire_upto(prev);
1245
1246         mutex_unlock(&tl->mutex);
1247 }
1248
1249 static unsigned long local_clock_us(unsigned int *cpu)
1250 {
1251         unsigned long t;
1252
1253         /*
1254          * Cheaply and approximately convert from nanoseconds to microseconds.
1255          * The result and subsequent calculations are also defined in the same
1256          * approximate microseconds units. The principal source of timing
1257          * error here is from the simple truncation.
1258          *
1259          * Note that local_clock() is only defined wrt to the current CPU;
1260          * the comparisons are no longer valid if we switch CPUs. Instead of
1261          * blocking preemption for the entire busywait, we can detect the CPU
1262          * switch and use that as indicator of system load and a reason to
1263          * stop busywaiting, see busywait_stop().
1264          */
1265         *cpu = get_cpu();
1266         t = local_clock() >> 10;
1267         put_cpu();
1268
1269         return t;
1270 }
1271
1272 static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1273 {
1274         unsigned int this_cpu;
1275
1276         if (time_after(local_clock_us(&this_cpu), timeout))
1277                 return true;
1278
1279         return this_cpu != cpu;
1280 }
1281
1282 static bool __i915_spin_request(const struct i915_request * const rq,
1283                                 int state, unsigned long timeout_us)
1284 {
1285         unsigned int cpu;
1286
1287         /*
1288          * Only wait for the request if we know it is likely to complete.
1289          *
1290          * We don't track the timestamps around requests, nor the average
1291          * request length, so we do not have a good indicator that this
1292          * request will complete within the timeout. What we do know is the
1293          * order in which requests are executed by the context and so we can
1294          * tell if the request has been started. If the request is not even
1295          * running yet, it is a fair assumption that it will not complete
1296          * within our relatively short timeout.
1297          */
1298         if (!i915_request_is_running(rq))
1299                 return false;
1300
1301         /*
1302          * When waiting for high frequency requests, e.g. during synchronous
1303          * rendering split between the CPU and GPU, the finite amount of time
1304          * required to set up the irq and wait upon it limits the response
1305          * rate. By busywaiting on the request completion for a short while we
1306          * can service the high frequency waits as quick as possible. However,
1307          * if it is a slow request, we want to sleep as quickly as possible.
1308          * The tradeoff between waiting and sleeping is roughly the time it
1309          * takes to sleep on a request, on the order of a microsecond.
1310          */
1311
1312         timeout_us += local_clock_us(&cpu);
1313         do {
1314                 if (i915_request_completed(rq))
1315                         return true;
1316
1317                 if (signal_pending_state(state, current))
1318                         break;
1319
1320                 if (busywait_stop(timeout_us, cpu))
1321                         break;
1322
1323                 cpu_relax();
1324         } while (!need_resched());
1325
1326         return false;
1327 }
1328
1329 struct request_wait {
1330         struct dma_fence_cb cb;
1331         struct task_struct *tsk;
1332 };
1333
1334 static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
1335 {
1336         struct request_wait *wait = container_of(cb, typeof(*wait), cb);
1337
1338         wake_up_process(wait->tsk);
1339 }
1340
1341 /**
1342  * i915_request_wait - wait until execution of request has finished
1343  * @rq: the request to wait upon
1344  * @flags: how to wait
1345  * @timeout: how long to wait in jiffies
1346  *
1347  * i915_request_wait() waits for the request to be completed, for a
1348  * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1349  * unbounded wait).
1350  *
1351  * Returns the remaining time (in jiffies) if the request completed, which may
1352  * be zero or -ETIME if the request is unfinished after the timeout expires.
1353  * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1354  * pending before the request completes.
1355  */
1356 long i915_request_wait(struct i915_request *rq,
1357                        unsigned int flags,
1358                        long timeout)
1359 {
1360         const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1361                 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1362         struct request_wait wait;
1363
1364         might_sleep();
1365         GEM_BUG_ON(timeout < 0);
1366
1367         if (dma_fence_is_signaled(&rq->fence))
1368                 return timeout;
1369
1370         if (!timeout)
1371                 return -ETIME;
1372
1373         trace_i915_request_wait_begin(rq, flags);
1374
1375         /*
1376          * We must never wait on the GPU while holding a lock as we
1377          * may need to perform a GPU reset. So while we don't need to
1378          * serialise wait/reset with an explicit lock, we do want
1379          * lockdep to detect potential dependency cycles.
1380          */
1381         mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
1382
1383         /*
1384          * Optimistic spin before touching IRQs.
1385          *
1386          * We may use a rather large value here to offset the penalty of
1387          * switching away from the active task. Frequently, the client will
1388          * wait upon an old swapbuffer to throttle itself to remain within a
1389          * frame of the gpu. If the client is running in lockstep with the gpu,
1390          * then it should not be waiting long at all, and a sleep now will incur
1391          * extra scheduler latency in producing the next frame. To try to
1392          * avoid adding the cost of enabling/disabling the interrupt to the
1393          * short wait, we first spin to see if the request would have completed
1394          * in the time taken to setup the interrupt.
1395          *
1396          * We need upto 5us to enable the irq, and upto 20us to hide the
1397          * scheduler latency of a context switch, ignoring the secondary
1398          * impacts from a context switch such as cache eviction.
1399          *
1400          * The scheme used for low-latency IO is called "hybrid interrupt
1401          * polling". The suggestion there is to sleep until just before you
1402          * expect to be woken by the device interrupt and then poll for its
1403          * completion. That requires having a good predictor for the request
1404          * duration, which we currently lack.
1405          */
1406         if (CONFIG_DRM_I915_SPIN_REQUEST &&
1407             __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
1408                 dma_fence_signal(&rq->fence);
1409                 goto out;
1410         }
1411
1412         /*
1413          * This client is about to stall waiting for the GPU. In many cases
1414          * this is undesirable and limits the throughput of the system, as
1415          * many clients cannot continue processing user input/output whilst
1416          * blocked. RPS autotuning may take tens of milliseconds to respond
1417          * to the GPU load and thus incurs additional latency for the client.
1418          * We can circumvent that by promoting the GPU frequency to maximum
1419          * before we sleep. This makes the GPU throttle up much more quickly
1420          * (good for benchmarks and user experience, e.g. window animations),
1421          * but at a cost of spending more power processing the workload
1422          * (bad for battery).
1423          */
1424         if (flags & I915_WAIT_PRIORITY) {
1425                 if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
1426                         gen6_rps_boost(rq);
1427                 i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
1428         }
1429
1430         wait.tsk = current;
1431         if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
1432                 goto out;
1433
1434         for (;;) {
1435                 set_current_state(state);
1436
1437                 if (i915_request_completed(rq)) {
1438                         dma_fence_signal(&rq->fence);
1439                         break;
1440                 }
1441
1442                 if (signal_pending_state(state, current)) {
1443                         timeout = -ERESTARTSYS;
1444                         break;
1445                 }
1446
1447                 if (!timeout) {
1448                         timeout = -ETIME;
1449                         break;
1450                 }
1451
1452                 timeout = io_schedule_timeout(timeout);
1453         }
1454         __set_current_state(TASK_RUNNING);
1455
1456         dma_fence_remove_callback(&rq->fence, &wait.cb);
1457
1458 out:
1459         mutex_release(&rq->engine->gt->reset.mutex.dep_map, 0, _THIS_IP_);
1460         trace_i915_request_wait_end(rq);
1461         return timeout;
1462 }
1463
1464 bool i915_retire_requests(struct drm_i915_private *i915)
1465 {
1466         struct intel_gt_timelines *timelines = &i915->gt.timelines;
1467         struct intel_timeline *tl, *tn;
1468         unsigned long flags;
1469         LIST_HEAD(free);
1470
1471         spin_lock_irqsave(&timelines->lock, flags);
1472         list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
1473                 if (!mutex_trylock(&tl->mutex))
1474                         continue;
1475
1476                 intel_timeline_get(tl);
1477                 GEM_BUG_ON(!tl->active_count);
1478                 tl->active_count++; /* pin the list element */
1479                 spin_unlock_irqrestore(&timelines->lock, flags);
1480
1481                 retire_requests(tl);
1482
1483                 spin_lock_irqsave(&timelines->lock, flags);
1484
1485                 /* Resume iteration after dropping lock */
1486                 list_safe_reset_next(tl, tn, link);
1487                 if (!--tl->active_count)
1488                         list_del(&tl->link);
1489
1490                 mutex_unlock(&tl->mutex);
1491
1492                 /* Defer the final release to after the spinlock */
1493                 if (refcount_dec_and_test(&tl->kref.refcount)) {
1494                         GEM_BUG_ON(tl->active_count);
1495                         list_add(&tl->link, &free);
1496                 }
1497         }
1498         spin_unlock_irqrestore(&timelines->lock, flags);
1499
1500         list_for_each_entry_safe(tl, tn, &free, link)
1501                 __intel_timeline_free(&tl->kref);
1502
1503         return !list_empty(&timelines->active_list);
1504 }
1505
1506 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1507 #include "selftests/mock_request.c"
1508 #include "selftests/i915_request.c"
1509 #endif
1510
1511 static void i915_global_request_shrink(void)
1512 {
1513         kmem_cache_shrink(global.slab_dependencies);
1514         kmem_cache_shrink(global.slab_execute_cbs);
1515         kmem_cache_shrink(global.slab_requests);
1516 }
1517
1518 static void i915_global_request_exit(void)
1519 {
1520         kmem_cache_destroy(global.slab_dependencies);
1521         kmem_cache_destroy(global.slab_execute_cbs);
1522         kmem_cache_destroy(global.slab_requests);
1523 }
1524
1525 static struct i915_global_request global = { {
1526         .shrink = i915_global_request_shrink,
1527         .exit = i915_global_request_exit,
1528 } };
1529
1530 int __init i915_global_request_init(void)
1531 {
1532         global.slab_requests = KMEM_CACHE(i915_request,
1533                                           SLAB_HWCACHE_ALIGN |
1534                                           SLAB_RECLAIM_ACCOUNT |
1535                                           SLAB_TYPESAFE_BY_RCU);
1536         if (!global.slab_requests)
1537                 return -ENOMEM;
1538
1539         global.slab_execute_cbs = KMEM_CACHE(execute_cb,
1540                                              SLAB_HWCACHE_ALIGN |
1541                                              SLAB_RECLAIM_ACCOUNT |
1542                                              SLAB_TYPESAFE_BY_RCU);
1543         if (!global.slab_execute_cbs)
1544                 goto err_requests;
1545
1546         global.slab_dependencies = KMEM_CACHE(i915_dependency,
1547                                               SLAB_HWCACHE_ALIGN |
1548                                               SLAB_RECLAIM_ACCOUNT);
1549         if (!global.slab_dependencies)
1550                 goto err_execute_cbs;
1551
1552         i915_global_register(&global.base);
1553         return 0;
1554
1555 err_execute_cbs:
1556         kmem_cache_destroy(global.slab_execute_cbs);
1557 err_requests:
1558         kmem_cache_destroy(global.slab_requests);
1559         return -ENOMEM;
1560 }