Merge tag 'dmaengine-5.1-rc1' of git://git.infradead.org/users/vkoul/slave-dma
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_request.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/prefetch.h>
26 #include <linux/dma-fence-array.h>
27 #include <linux/sched.h>
28 #include <linux/sched/clock.h>
29 #include <linux/sched/signal.h>
30
31 #include "i915_drv.h"
32 #include "i915_active.h"
33 #include "i915_reset.h"
34
35 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
36 {
37         return "i915";
38 }
39
40 static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
41 {
42         /*
43          * The timeline struct (as part of the ppgtt underneath a context)
44          * may be freed when the request is no longer in use by the GPU.
45          * We could extend the life of a context to beyond that of all
46          * fences, possibly keeping the hw resource around indefinitely,
47          * or we just give them a false name. Since
48          * dma_fence_ops.get_timeline_name is a debug feature, the occasional
49          * lie seems justifiable.
50          */
51         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
52                 return "signaled";
53
54         return to_request(fence)->timeline->name;
55 }
56
57 static bool i915_fence_signaled(struct dma_fence *fence)
58 {
59         return i915_request_completed(to_request(fence));
60 }
61
62 static bool i915_fence_enable_signaling(struct dma_fence *fence)
63 {
64         return i915_request_enable_breadcrumb(to_request(fence));
65 }
66
67 static signed long i915_fence_wait(struct dma_fence *fence,
68                                    bool interruptible,
69                                    signed long timeout)
70 {
71         return i915_request_wait(to_request(fence), interruptible, timeout);
72 }
73
74 static void i915_fence_release(struct dma_fence *fence)
75 {
76         struct i915_request *rq = to_request(fence);
77
78         /*
79          * The request is put onto a RCU freelist (i.e. the address
80          * is immediately reused), mark the fences as being freed now.
81          * Otherwise the debugobjects for the fences are only marked as
82          * freed when the slab cache itself is freed, and so we would get
83          * caught trying to reuse dead objects.
84          */
85         i915_sw_fence_fini(&rq->submit);
86
87         kmem_cache_free(rq->i915->requests, rq);
88 }
89
90 const struct dma_fence_ops i915_fence_ops = {
91         .get_driver_name = i915_fence_get_driver_name,
92         .get_timeline_name = i915_fence_get_timeline_name,
93         .enable_signaling = i915_fence_enable_signaling,
94         .signaled = i915_fence_signaled,
95         .wait = i915_fence_wait,
96         .release = i915_fence_release,
97 };
98
99 static inline void
100 i915_request_remove_from_client(struct i915_request *request)
101 {
102         struct drm_i915_file_private *file_priv;
103
104         file_priv = request->file_priv;
105         if (!file_priv)
106                 return;
107
108         spin_lock(&file_priv->mm.lock);
109         if (request->file_priv) {
110                 list_del(&request->client_link);
111                 request->file_priv = NULL;
112         }
113         spin_unlock(&file_priv->mm.lock);
114 }
115
116 static void reserve_gt(struct drm_i915_private *i915)
117 {
118         if (!i915->gt.active_requests++)
119                 i915_gem_unpark(i915);
120 }
121
122 static void unreserve_gt(struct drm_i915_private *i915)
123 {
124         GEM_BUG_ON(!i915->gt.active_requests);
125         if (!--i915->gt.active_requests)
126                 i915_gem_park(i915);
127 }
128
129 static void advance_ring(struct i915_request *request)
130 {
131         struct intel_ring *ring = request->ring;
132         unsigned int tail;
133
134         /*
135          * We know the GPU must have read the request to have
136          * sent us the seqno + interrupt, so use the position
137          * of tail of the request to update the last known position
138          * of the GPU head.
139          *
140          * Note this requires that we are always called in request
141          * completion order.
142          */
143         GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list));
144         if (list_is_last(&request->ring_link, &ring->request_list)) {
145                 /*
146                  * We may race here with execlists resubmitting this request
147                  * as we retire it. The resubmission will move the ring->tail
148                  * forwards (to request->wa_tail). We either read the
149                  * current value that was written to hw, or the value that
150                  * is just about to be. Either works, if we miss the last two
151                  * noops - they are safe to be replayed on a reset.
152                  */
153                 GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
154                 tail = READ_ONCE(request->tail);
155                 list_del(&ring->active_link);
156         } else {
157                 tail = request->postfix;
158         }
159         list_del_init(&request->ring_link);
160
161         ring->head = tail;
162 }
163
164 static void free_capture_list(struct i915_request *request)
165 {
166         struct i915_capture_list *capture;
167
168         capture = request->capture_list;
169         while (capture) {
170                 struct i915_capture_list *next = capture->next;
171
172                 kfree(capture);
173                 capture = next;
174         }
175 }
176
177 static void __retire_engine_request(struct intel_engine_cs *engine,
178                                     struct i915_request *rq)
179 {
180         GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d:%d\n",
181                   __func__, engine->name,
182                   rq->fence.context, rq->fence.seqno,
183                   rq->global_seqno,
184                   hwsp_seqno(rq),
185                   intel_engine_get_seqno(engine));
186
187         GEM_BUG_ON(!i915_request_completed(rq));
188
189         local_irq_disable();
190
191         spin_lock(&engine->timeline.lock);
192         GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests));
193         list_del_init(&rq->link);
194         spin_unlock(&engine->timeline.lock);
195
196         spin_lock(&rq->lock);
197         i915_request_mark_complete(rq);
198         if (!i915_request_signaled(rq))
199                 dma_fence_signal_locked(&rq->fence);
200         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
201                 i915_request_cancel_breadcrumb(rq);
202         if (rq->waitboost) {
203                 GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
204                 atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
205         }
206         spin_unlock(&rq->lock);
207
208         local_irq_enable();
209
210         /*
211          * The backing object for the context is done after switching to the
212          * *next* context. Therefore we cannot retire the previous context until
213          * the next context has already started running. However, since we
214          * cannot take the required locks at i915_request_submit() we
215          * defer the unpinning of the active context to now, retirement of
216          * the subsequent request.
217          */
218         if (engine->last_retired_context)
219                 intel_context_unpin(engine->last_retired_context);
220         engine->last_retired_context = rq->hw_context;
221 }
222
223 static void __retire_engine_upto(struct intel_engine_cs *engine,
224                                  struct i915_request *rq)
225 {
226         struct i915_request *tmp;
227
228         if (list_empty(&rq->link))
229                 return;
230
231         do {
232                 tmp = list_first_entry(&engine->timeline.requests,
233                                        typeof(*tmp), link);
234
235                 GEM_BUG_ON(tmp->engine != engine);
236                 __retire_engine_request(engine, tmp);
237         } while (tmp != rq);
238 }
239
240 static void i915_request_retire(struct i915_request *request)
241 {
242         struct i915_active_request *active, *next;
243
244         GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
245                   request->engine->name,
246                   request->fence.context, request->fence.seqno,
247                   request->global_seqno,
248                   hwsp_seqno(request),
249                   intel_engine_get_seqno(request->engine));
250
251         lockdep_assert_held(&request->i915->drm.struct_mutex);
252         GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
253         GEM_BUG_ON(!i915_request_completed(request));
254
255         trace_i915_request_retire(request);
256
257         advance_ring(request);
258         free_capture_list(request);
259
260         /*
261          * Walk through the active list, calling retire on each. This allows
262          * objects to track their GPU activity and mark themselves as idle
263          * when their *last* active request is completed (updating state
264          * tracking lists for eviction, active references for GEM, etc).
265          *
266          * As the ->retire() may free the node, we decouple it first and
267          * pass along the auxiliary information (to avoid dereferencing
268          * the node after the callback).
269          */
270         list_for_each_entry_safe(active, next, &request->active_list, link) {
271                 /*
272                  * In microbenchmarks or focusing upon time inside the kernel,
273                  * we may spend an inordinate amount of time simply handling
274                  * the retirement of requests and processing their callbacks.
275                  * Of which, this loop itself is particularly hot due to the
276                  * cache misses when jumping around the list of
277                  * i915_active_request.  So we try to keep this loop as
278                  * streamlined as possible and also prefetch the next
279                  * i915_active_request to try and hide the likely cache miss.
280                  */
281                 prefetchw(next);
282
283                 INIT_LIST_HEAD(&active->link);
284                 RCU_INIT_POINTER(active->request, NULL);
285
286                 active->retire(active, request);
287         }
288
289         i915_request_remove_from_client(request);
290
291         /* Retirement decays the ban score as it is a sign of ctx progress */
292         atomic_dec_if_positive(&request->gem_context->ban_score);
293         intel_context_unpin(request->hw_context);
294
295         __retire_engine_upto(request->engine, request);
296
297         unreserve_gt(request->i915);
298
299         i915_sched_node_fini(request->i915, &request->sched);
300         i915_request_put(request);
301 }
302
303 void i915_request_retire_upto(struct i915_request *rq)
304 {
305         struct intel_ring *ring = rq->ring;
306         struct i915_request *tmp;
307
308         GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
309                   rq->engine->name,
310                   rq->fence.context, rq->fence.seqno,
311                   rq->global_seqno,
312                   hwsp_seqno(rq),
313                   intel_engine_get_seqno(rq->engine));
314
315         lockdep_assert_held(&rq->i915->drm.struct_mutex);
316         GEM_BUG_ON(!i915_request_completed(rq));
317
318         if (list_empty(&rq->ring_link))
319                 return;
320
321         do {
322                 tmp = list_first_entry(&ring->request_list,
323                                        typeof(*tmp), ring_link);
324
325                 i915_request_retire(tmp);
326         } while (tmp != rq);
327 }
328
329 static u32 timeline_get_seqno(struct i915_timeline *tl)
330 {
331         return tl->seqno += 1 + tl->has_initial_breadcrumb;
332 }
333
334 static void move_to_timeline(struct i915_request *request,
335                              struct i915_timeline *timeline)
336 {
337         GEM_BUG_ON(request->timeline == &request->engine->timeline);
338         lockdep_assert_held(&request->engine->timeline.lock);
339
340         spin_lock(&request->timeline->lock);
341         list_move_tail(&request->link, &timeline->requests);
342         spin_unlock(&request->timeline->lock);
343 }
344
345 static u32 next_global_seqno(struct i915_timeline *tl)
346 {
347         if (!++tl->seqno)
348                 ++tl->seqno;
349         return tl->seqno;
350 }
351
352 void __i915_request_submit(struct i915_request *request)
353 {
354         struct intel_engine_cs *engine = request->engine;
355         u32 seqno;
356
357         GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d:%d\n",
358                   engine->name,
359                   request->fence.context, request->fence.seqno,
360                   engine->timeline.seqno + 1,
361                   hwsp_seqno(request),
362                   intel_engine_get_seqno(engine));
363
364         GEM_BUG_ON(!irqs_disabled());
365         lockdep_assert_held(&engine->timeline.lock);
366
367         GEM_BUG_ON(request->global_seqno);
368
369         seqno = next_global_seqno(&engine->timeline);
370         GEM_BUG_ON(!seqno);
371         GEM_BUG_ON(intel_engine_signaled(engine, seqno));
372
373         /* We may be recursing from the signal callback of another i915 fence */
374         spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
375         GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
376         set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
377         request->global_seqno = seqno;
378         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
379             !i915_request_enable_breadcrumb(request))
380                 intel_engine_queue_breadcrumbs(engine);
381         spin_unlock(&request->lock);
382
383         engine->emit_fini_breadcrumb(request,
384                                      request->ring->vaddr + request->postfix);
385
386         /* Transfer from per-context onto the global per-engine timeline */
387         move_to_timeline(request, &engine->timeline);
388
389         trace_i915_request_execute(request);
390 }
391
392 void i915_request_submit(struct i915_request *request)
393 {
394         struct intel_engine_cs *engine = request->engine;
395         unsigned long flags;
396
397         /* Will be called from irq-context when using foreign fences. */
398         spin_lock_irqsave(&engine->timeline.lock, flags);
399
400         __i915_request_submit(request);
401
402         spin_unlock_irqrestore(&engine->timeline.lock, flags);
403 }
404
405 void __i915_request_unsubmit(struct i915_request *request)
406 {
407         struct intel_engine_cs *engine = request->engine;
408
409         GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d:%d\n",
410                   engine->name,
411                   request->fence.context, request->fence.seqno,
412                   request->global_seqno,
413                   hwsp_seqno(request),
414                   intel_engine_get_seqno(engine));
415
416         GEM_BUG_ON(!irqs_disabled());
417         lockdep_assert_held(&engine->timeline.lock);
418
419         /*
420          * Only unwind in reverse order, required so that the per-context list
421          * is kept in seqno/ring order.
422          */
423         GEM_BUG_ON(!request->global_seqno);
424         GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
425         GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
426         engine->timeline.seqno--;
427
428         /* We may be recursing from the signal callback of another i915 fence */
429         spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
430         request->global_seqno = 0;
431         if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
432                 i915_request_cancel_breadcrumb(request);
433         GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
434         clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
435         spin_unlock(&request->lock);
436
437         /* Transfer back from the global per-engine timeline to per-context */
438         move_to_timeline(request, request->timeline);
439
440         /*
441          * We don't need to wake_up any waiters on request->execute, they
442          * will get woken by any other event or us re-adding this request
443          * to the engine timeline (__i915_request_submit()). The waiters
444          * should be quite adapt at finding that the request now has a new
445          * global_seqno to the one they went to sleep on.
446          */
447 }
448
449 void i915_request_unsubmit(struct i915_request *request)
450 {
451         struct intel_engine_cs *engine = request->engine;
452         unsigned long flags;
453
454         /* Will be called from irq-context when using foreign fences. */
455         spin_lock_irqsave(&engine->timeline.lock, flags);
456
457         __i915_request_unsubmit(request);
458
459         spin_unlock_irqrestore(&engine->timeline.lock, flags);
460 }
461
462 static int __i915_sw_fence_call
463 submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
464 {
465         struct i915_request *request =
466                 container_of(fence, typeof(*request), submit);
467
468         switch (state) {
469         case FENCE_COMPLETE:
470                 trace_i915_request_submit(request);
471                 /*
472                  * We need to serialize use of the submit_request() callback
473                  * with its hotplugging performed during an emergency
474                  * i915_gem_set_wedged().  We use the RCU mechanism to mark the
475                  * critical section in order to force i915_gem_set_wedged() to
476                  * wait until the submit_request() is completed before
477                  * proceeding.
478                  */
479                 rcu_read_lock();
480                 request->engine->submit_request(request);
481                 rcu_read_unlock();
482                 break;
483
484         case FENCE_FREE:
485                 i915_request_put(request);
486                 break;
487         }
488
489         return NOTIFY_DONE;
490 }
491
492 static void ring_retire_requests(struct intel_ring *ring)
493 {
494         struct i915_request *rq, *rn;
495
496         list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) {
497                 if (!i915_request_completed(rq))
498                         break;
499
500                 i915_request_retire(rq);
501         }
502 }
503
504 static noinline struct i915_request *
505 i915_request_alloc_slow(struct intel_context *ce)
506 {
507         struct intel_ring *ring = ce->ring;
508         struct i915_request *rq;
509
510         if (list_empty(&ring->request_list))
511                 goto out;
512
513         /* Ratelimit ourselves to prevent oom from malicious clients */
514         rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
515         cond_synchronize_rcu(rq->rcustate);
516
517         /* Retire our old requests in the hope that we free some */
518         ring_retire_requests(ring);
519
520 out:
521         return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL);
522 }
523
524 static int add_timeline_barrier(struct i915_request *rq)
525 {
526         return i915_request_await_active_request(rq, &rq->timeline->barrier);
527 }
528
529 /**
530  * i915_request_alloc - allocate a request structure
531  *
532  * @engine: engine that we wish to issue the request on.
533  * @ctx: context that the request will be associated with.
534  *
535  * Returns a pointer to the allocated request if successful,
536  * or an error code if not.
537  */
538 struct i915_request *
539 i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
540 {
541         struct drm_i915_private *i915 = engine->i915;
542         struct i915_request *rq;
543         struct intel_context *ce;
544         int ret;
545
546         lockdep_assert_held(&i915->drm.struct_mutex);
547
548         /*
549          * Preempt contexts are reserved for exclusive use to inject a
550          * preemption context switch. They are never to be used for any trivial
551          * request!
552          */
553         GEM_BUG_ON(ctx == i915->preempt_context);
554
555         /*
556          * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
557          * EIO if the GPU is already wedged.
558          */
559         if (i915_terminally_wedged(&i915->gpu_error))
560                 return ERR_PTR(-EIO);
561
562         /*
563          * Pinning the contexts may generate requests in order to acquire
564          * GGTT space, so do this first before we reserve a seqno for
565          * ourselves.
566          */
567         ce = intel_context_pin(ctx, engine);
568         if (IS_ERR(ce))
569                 return ERR_CAST(ce);
570
571         reserve_gt(i915);
572
573         /* Move our oldest request to the slab-cache (if not in use!) */
574         rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
575         if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
576             i915_request_completed(rq))
577                 i915_request_retire(rq);
578
579         /*
580          * Beware: Dragons be flying overhead.
581          *
582          * We use RCU to look up requests in flight. The lookups may
583          * race with the request being allocated from the slab freelist.
584          * That is the request we are writing to here, may be in the process
585          * of being read by __i915_active_request_get_rcu(). As such,
586          * we have to be very careful when overwriting the contents. During
587          * the RCU lookup, we change chase the request->engine pointer,
588          * read the request->global_seqno and increment the reference count.
589          *
590          * The reference count is incremented atomically. If it is zero,
591          * the lookup knows the request is unallocated and complete. Otherwise,
592          * it is either still in use, or has been reallocated and reset
593          * with dma_fence_init(). This increment is safe for release as we
594          * check that the request we have a reference to and matches the active
595          * request.
596          *
597          * Before we increment the refcount, we chase the request->engine
598          * pointer. We must not call kmem_cache_zalloc() or else we set
599          * that pointer to NULL and cause a crash during the lookup. If
600          * we see the request is completed (based on the value of the
601          * old engine and seqno), the lookup is complete and reports NULL.
602          * If we decide the request is not completed (new engine or seqno),
603          * then we grab a reference and double check that it is still the
604          * active request - which it won't be and restart the lookup.
605          *
606          * Do not use kmem_cache_zalloc() here!
607          */
608         rq = kmem_cache_alloc(i915->requests,
609                               GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
610         if (unlikely(!rq)) {
611                 rq = i915_request_alloc_slow(ce);
612                 if (!rq) {
613                         ret = -ENOMEM;
614                         goto err_unreserve;
615                 }
616         }
617
618         rq->rcustate = get_state_synchronize_rcu();
619
620         INIT_LIST_HEAD(&rq->active_list);
621         rq->i915 = i915;
622         rq->engine = engine;
623         rq->gem_context = ctx;
624         rq->hw_context = ce;
625         rq->ring = ce->ring;
626         rq->timeline = ce->ring->timeline;
627         GEM_BUG_ON(rq->timeline == &engine->timeline);
628         rq->hwsp_seqno = rq->timeline->hwsp_seqno;
629
630         spin_lock_init(&rq->lock);
631         dma_fence_init(&rq->fence,
632                        &i915_fence_ops,
633                        &rq->lock,
634                        rq->timeline->fence_context,
635                        timeline_get_seqno(rq->timeline));
636
637         /* We bump the ref for the fence chain */
638         i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
639
640         i915_sched_node_init(&rq->sched);
641
642         /* No zalloc, must clear what we need by hand */
643         rq->global_seqno = 0;
644         rq->file_priv = NULL;
645         rq->batch = NULL;
646         rq->capture_list = NULL;
647         rq->waitboost = false;
648
649         /*
650          * Reserve space in the ring buffer for all the commands required to
651          * eventually emit this request. This is to guarantee that the
652          * i915_request_add() call can't fail. Note that the reserve may need
653          * to be redone if the request is not actually submitted straight
654          * away, e.g. because a GPU scheduler has deferred it.
655          *
656          * Note that due to how we add reserved_space to intel_ring_begin()
657          * we need to double our request to ensure that if we need to wrap
658          * around inside i915_request_add() there is sufficient space at
659          * the beginning of the ring as well.
660          */
661         rq->reserved_space = 2 * engine->emit_fini_breadcrumb_dw * sizeof(u32);
662
663         /*
664          * Record the position of the start of the request so that
665          * should we detect the updated seqno part-way through the
666          * GPU processing the request, we never over-estimate the
667          * position of the head.
668          */
669         rq->head = rq->ring->emit;
670
671         ret = add_timeline_barrier(rq);
672         if (ret)
673                 goto err_unwind;
674
675         ret = engine->request_alloc(rq);
676         if (ret)
677                 goto err_unwind;
678
679         /* Keep a second pin for the dual retirement along engine and ring */
680         __intel_context_pin(ce);
681
682         rq->infix = rq->ring->emit; /* end of header; start of user payload */
683
684         /* Check that we didn't interrupt ourselves with a new request */
685         GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
686         return rq;
687
688 err_unwind:
689         ce->ring->emit = rq->head;
690
691         /* Make sure we didn't add ourselves to external state before freeing */
692         GEM_BUG_ON(!list_empty(&rq->active_list));
693         GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
694         GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
695
696         kmem_cache_free(i915->requests, rq);
697 err_unreserve:
698         unreserve_gt(i915);
699         intel_context_unpin(ce);
700         return ERR_PTR(ret);
701 }
702
703 static int
704 i915_request_await_request(struct i915_request *to, struct i915_request *from)
705 {
706         int ret;
707
708         GEM_BUG_ON(to == from);
709         GEM_BUG_ON(to->timeline == from->timeline);
710
711         if (i915_request_completed(from))
712                 return 0;
713
714         if (to->engine->schedule) {
715                 ret = i915_sched_node_add_dependency(to->i915,
716                                                      &to->sched,
717                                                      &from->sched);
718                 if (ret < 0)
719                         return ret;
720         }
721
722         if (to->engine == from->engine) {
723                 ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
724                                                        &from->submit,
725                                                        I915_FENCE_GFP);
726         } else {
727                 ret = i915_sw_fence_await_dma_fence(&to->submit,
728                                                     &from->fence, 0,
729                                                     I915_FENCE_GFP);
730         }
731
732         return ret < 0 ? ret : 0;
733 }
734
735 int
736 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
737 {
738         struct dma_fence **child = &fence;
739         unsigned int nchild = 1;
740         int ret;
741
742         /*
743          * Note that if the fence-array was created in signal-on-any mode,
744          * we should *not* decompose it into its individual fences. However,
745          * we don't currently store which mode the fence-array is operating
746          * in. Fortunately, the only user of signal-on-any is private to
747          * amdgpu and we should not see any incoming fence-array from
748          * sync-file being in signal-on-any mode.
749          */
750         if (dma_fence_is_array(fence)) {
751                 struct dma_fence_array *array = to_dma_fence_array(fence);
752
753                 child = array->fences;
754                 nchild = array->num_fences;
755                 GEM_BUG_ON(!nchild);
756         }
757
758         do {
759                 fence = *child++;
760                 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
761                         continue;
762
763                 /*
764                  * Requests on the same timeline are explicitly ordered, along
765                  * with their dependencies, by i915_request_add() which ensures
766                  * that requests are submitted in-order through each ring.
767                  */
768                 if (fence->context == rq->fence.context)
769                         continue;
770
771                 /* Squash repeated waits to the same timelines */
772                 if (fence->context != rq->i915->mm.unordered_timeline &&
773                     i915_timeline_sync_is_later(rq->timeline, fence))
774                         continue;
775
776                 if (dma_fence_is_i915(fence))
777                         ret = i915_request_await_request(rq, to_request(fence));
778                 else
779                         ret = i915_sw_fence_await_dma_fence(&rq->submit, fence,
780                                                             I915_FENCE_TIMEOUT,
781                                                             I915_FENCE_GFP);
782                 if (ret < 0)
783                         return ret;
784
785                 /* Record the latest fence used against each timeline */
786                 if (fence->context != rq->i915->mm.unordered_timeline)
787                         i915_timeline_sync_set(rq->timeline, fence);
788         } while (--nchild);
789
790         return 0;
791 }
792
793 /**
794  * i915_request_await_object - set this request to (async) wait upon a bo
795  * @to: request we are wishing to use
796  * @obj: object which may be in use on another ring.
797  * @write: whether the wait is on behalf of a writer
798  *
799  * This code is meant to abstract object synchronization with the GPU.
800  * Conceptually we serialise writes between engines inside the GPU.
801  * We only allow one engine to write into a buffer at any time, but
802  * multiple readers. To ensure each has a coherent view of memory, we must:
803  *
804  * - If there is an outstanding write request to the object, the new
805  *   request must wait for it to complete (either CPU or in hw, requests
806  *   on the same ring will be naturally ordered).
807  *
808  * - If we are a write request (pending_write_domain is set), the new
809  *   request must wait for outstanding read requests to complete.
810  *
811  * Returns 0 if successful, else propagates up the lower layer error.
812  */
813 int
814 i915_request_await_object(struct i915_request *to,
815                           struct drm_i915_gem_object *obj,
816                           bool write)
817 {
818         struct dma_fence *excl;
819         int ret = 0;
820
821         if (write) {
822                 struct dma_fence **shared;
823                 unsigned int count, i;
824
825                 ret = reservation_object_get_fences_rcu(obj->resv,
826                                                         &excl, &count, &shared);
827                 if (ret)
828                         return ret;
829
830                 for (i = 0; i < count; i++) {
831                         ret = i915_request_await_dma_fence(to, shared[i]);
832                         if (ret)
833                                 break;
834
835                         dma_fence_put(shared[i]);
836                 }
837
838                 for (; i < count; i++)
839                         dma_fence_put(shared[i]);
840                 kfree(shared);
841         } else {
842                 excl = reservation_object_get_excl_rcu(obj->resv);
843         }
844
845         if (excl) {
846                 if (ret == 0)
847                         ret = i915_request_await_dma_fence(to, excl);
848
849                 dma_fence_put(excl);
850         }
851
852         return ret;
853 }
854
855 void i915_request_skip(struct i915_request *rq, int error)
856 {
857         void *vaddr = rq->ring->vaddr;
858         u32 head;
859
860         GEM_BUG_ON(!IS_ERR_VALUE((long)error));
861         dma_fence_set_error(&rq->fence, error);
862
863         /*
864          * As this request likely depends on state from the lost
865          * context, clear out all the user operations leaving the
866          * breadcrumb at the end (so we get the fence notifications).
867          */
868         head = rq->infix;
869         if (rq->postfix < head) {
870                 memset(vaddr + head, 0, rq->ring->size - head);
871                 head = 0;
872         }
873         memset(vaddr + head, 0, rq->postfix - head);
874 }
875
876 /*
877  * NB: This function is not allowed to fail. Doing so would mean the the
878  * request is not being tracked for completion but the work itself is
879  * going to happen on the hardware. This would be a Bad Thing(tm).
880  */
881 void i915_request_add(struct i915_request *request)
882 {
883         struct intel_engine_cs *engine = request->engine;
884         struct i915_timeline *timeline = request->timeline;
885         struct intel_ring *ring = request->ring;
886         struct i915_request *prev;
887         u32 *cs;
888
889         GEM_TRACE("%s fence %llx:%lld\n",
890                   engine->name, request->fence.context, request->fence.seqno);
891
892         lockdep_assert_held(&request->i915->drm.struct_mutex);
893         trace_i915_request_add(request);
894
895         /*
896          * Make sure that no request gazumped us - if it was allocated after
897          * our i915_request_alloc() and called __i915_request_add() before
898          * us, the timeline will hold its seqno which is later than ours.
899          */
900         GEM_BUG_ON(timeline->seqno != request->fence.seqno);
901
902         /*
903          * To ensure that this call will not fail, space for its emissions
904          * should already have been reserved in the ring buffer. Let the ring
905          * know that it is time to use that space up.
906          */
907         GEM_BUG_ON(request->reserved_space > request->ring->space);
908         request->reserved_space = 0;
909
910         /*
911          * Record the position of the start of the breadcrumb so that
912          * should we detect the updated seqno part-way through the
913          * GPU processing the request, we never over-estimate the
914          * position of the ring's HEAD.
915          */
916         cs = intel_ring_begin(request, engine->emit_fini_breadcrumb_dw);
917         GEM_BUG_ON(IS_ERR(cs));
918         request->postfix = intel_ring_offset(request, cs);
919
920         /*
921          * Seal the request and mark it as pending execution. Note that
922          * we may inspect this state, without holding any locks, during
923          * hangcheck. Hence we apply the barrier to ensure that we do not
924          * see a more recent value in the hws than we are tracking.
925          */
926
927         prev = i915_active_request_raw(&timeline->last_request,
928                                        &request->i915->drm.struct_mutex);
929         if (prev && !i915_request_completed(prev)) {
930                 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
931                                              &request->submitq);
932                 if (engine->schedule)
933                         __i915_sched_node_add_dependency(&request->sched,
934                                                          &prev->sched,
935                                                          &request->dep,
936                                                          0);
937         }
938
939         spin_lock_irq(&timeline->lock);
940         list_add_tail(&request->link, &timeline->requests);
941         spin_unlock_irq(&timeline->lock);
942
943         GEM_BUG_ON(timeline->seqno != request->fence.seqno);
944         __i915_active_request_set(&timeline->last_request, request);
945
946         list_add_tail(&request->ring_link, &ring->request_list);
947         if (list_is_first(&request->ring_link, &ring->request_list)) {
948                 GEM_TRACE("marking %s as active\n", ring->timeline->name);
949                 list_add(&ring->active_link, &request->i915->gt.active_rings);
950         }
951         request->emitted_jiffies = jiffies;
952
953         /*
954          * Let the backend know a new request has arrived that may need
955          * to adjust the existing execution schedule due to a high priority
956          * request - i.e. we may want to preempt the current request in order
957          * to run a high priority dependency chain *before* we can execute this
958          * request.
959          *
960          * This is called before the request is ready to run so that we can
961          * decide whether to preempt the entire chain so that it is ready to
962          * run at the earliest possible convenience.
963          */
964         local_bh_disable();
965         rcu_read_lock(); /* RCU serialisation for set-wedged protection */
966         if (engine->schedule) {
967                 struct i915_sched_attr attr = request->gem_context->sched;
968
969                 /*
970                  * Boost priorities to new clients (new request flows).
971                  *
972                  * Allow interactive/synchronous clients to jump ahead of
973                  * the bulk clients. (FQ_CODEL)
974                  */
975                 if (list_empty(&request->sched.signalers_list))
976                         attr.priority |= I915_PRIORITY_NEWCLIENT;
977
978                 engine->schedule(request, &attr);
979         }
980         rcu_read_unlock();
981         i915_sw_fence_commit(&request->submit);
982         local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
983
984         /*
985          * In typical scenarios, we do not expect the previous request on
986          * the timeline to be still tracked by timeline->last_request if it
987          * has been completed. If the completed request is still here, that
988          * implies that request retirement is a long way behind submission,
989          * suggesting that we haven't been retiring frequently enough from
990          * the combination of retire-before-alloc, waiters and the background
991          * retirement worker. So if the last request on this timeline was
992          * already completed, do a catch up pass, flushing the retirement queue
993          * up to this client. Since we have now moved the heaviest operations
994          * during retirement onto secondary workers, such as freeing objects
995          * or contexts, retiring a bunch of requests is mostly list management
996          * (and cache misses), and so we should not be overly penalizing this
997          * client by performing excess work, though we may still performing
998          * work on behalf of others -- but instead we should benefit from
999          * improved resource management. (Well, that's the theory at least.)
1000          */
1001         if (prev && i915_request_completed(prev))
1002                 i915_request_retire_upto(prev);
1003 }
1004
1005 static unsigned long local_clock_us(unsigned int *cpu)
1006 {
1007         unsigned long t;
1008
1009         /*
1010          * Cheaply and approximately convert from nanoseconds to microseconds.
1011          * The result and subsequent calculations are also defined in the same
1012          * approximate microseconds units. The principal source of timing
1013          * error here is from the simple truncation.
1014          *
1015          * Note that local_clock() is only defined wrt to the current CPU;
1016          * the comparisons are no longer valid if we switch CPUs. Instead of
1017          * blocking preemption for the entire busywait, we can detect the CPU
1018          * switch and use that as indicator of system load and a reason to
1019          * stop busywaiting, see busywait_stop().
1020          */
1021         *cpu = get_cpu();
1022         t = local_clock() >> 10;
1023         put_cpu();
1024
1025         return t;
1026 }
1027
1028 static bool busywait_stop(unsigned long timeout, unsigned int cpu)
1029 {
1030         unsigned int this_cpu;
1031
1032         if (time_after(local_clock_us(&this_cpu), timeout))
1033                 return true;
1034
1035         return this_cpu != cpu;
1036 }
1037
1038 static bool __i915_spin_request(const struct i915_request * const rq,
1039                                 int state, unsigned long timeout_us)
1040 {
1041         unsigned int cpu;
1042
1043         /*
1044          * Only wait for the request if we know it is likely to complete.
1045          *
1046          * We don't track the timestamps around requests, nor the average
1047          * request length, so we do not have a good indicator that this
1048          * request will complete within the timeout. What we do know is the
1049          * order in which requests are executed by the context and so we can
1050          * tell if the request has been started. If the request is not even
1051          * running yet, it is a fair assumption that it will not complete
1052          * within our relatively short timeout.
1053          */
1054         if (!i915_request_is_running(rq))
1055                 return false;
1056
1057         /*
1058          * When waiting for high frequency requests, e.g. during synchronous
1059          * rendering split between the CPU and GPU, the finite amount of time
1060          * required to set up the irq and wait upon it limits the response
1061          * rate. By busywaiting on the request completion for a short while we
1062          * can service the high frequency waits as quick as possible. However,
1063          * if it is a slow request, we want to sleep as quickly as possible.
1064          * The tradeoff between waiting and sleeping is roughly the time it
1065          * takes to sleep on a request, on the order of a microsecond.
1066          */
1067
1068         timeout_us += local_clock_us(&cpu);
1069         do {
1070                 if (i915_request_completed(rq))
1071                         return true;
1072
1073                 if (signal_pending_state(state, current))
1074                         break;
1075
1076                 if (busywait_stop(timeout_us, cpu))
1077                         break;
1078
1079                 cpu_relax();
1080         } while (!need_resched());
1081
1082         return false;
1083 }
1084
1085 struct request_wait {
1086         struct dma_fence_cb cb;
1087         struct task_struct *tsk;
1088 };
1089
1090 static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb)
1091 {
1092         struct request_wait *wait = container_of(cb, typeof(*wait), cb);
1093
1094         wake_up_process(wait->tsk);
1095 }
1096
1097 /**
1098  * i915_request_wait - wait until execution of request has finished
1099  * @rq: the request to wait upon
1100  * @flags: how to wait
1101  * @timeout: how long to wait in jiffies
1102  *
1103  * i915_request_wait() waits for the request to be completed, for a
1104  * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an
1105  * unbounded wait).
1106  *
1107  * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED
1108  * in via the flags, and vice versa if the struct_mutex is not held, the caller
1109  * must not specify that the wait is locked.
1110  *
1111  * Returns the remaining time (in jiffies) if the request completed, which may
1112  * be zero or -ETIME if the request is unfinished after the timeout expires.
1113  * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is
1114  * pending before the request completes.
1115  */
1116 long i915_request_wait(struct i915_request *rq,
1117                        unsigned int flags,
1118                        long timeout)
1119 {
1120         const int state = flags & I915_WAIT_INTERRUPTIBLE ?
1121                 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1122         struct request_wait wait;
1123
1124         might_sleep();
1125         GEM_BUG_ON(timeout < 0);
1126
1127         if (i915_request_completed(rq))
1128                 return timeout;
1129
1130         if (!timeout)
1131                 return -ETIME;
1132
1133         trace_i915_request_wait_begin(rq, flags);
1134
1135         /* Optimistic short spin before touching IRQs */
1136         if (__i915_spin_request(rq, state, 5))
1137                 goto out;
1138
1139         if (flags & I915_WAIT_PRIORITY)
1140                 i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
1141
1142         wait.tsk = current;
1143         if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
1144                 goto out;
1145
1146         for (;;) {
1147                 set_current_state(state);
1148
1149                 if (i915_request_completed(rq))
1150                         break;
1151
1152                 if (signal_pending_state(state, current)) {
1153                         timeout = -ERESTARTSYS;
1154                         break;
1155                 }
1156
1157                 if (!timeout) {
1158                         timeout = -ETIME;
1159                         break;
1160                 }
1161
1162                 timeout = io_schedule_timeout(timeout);
1163         }
1164         __set_current_state(TASK_RUNNING);
1165
1166         dma_fence_remove_callback(&rq->fence, &wait.cb);
1167
1168 out:
1169         trace_i915_request_wait_end(rq);
1170         return timeout;
1171 }
1172
1173 void i915_retire_requests(struct drm_i915_private *i915)
1174 {
1175         struct intel_ring *ring, *tmp;
1176
1177         lockdep_assert_held(&i915->drm.struct_mutex);
1178
1179         if (!i915->gt.active_requests)
1180                 return;
1181
1182         list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link)
1183                 ring_retire_requests(ring);
1184 }
1185
1186 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1187 #include "selftests/mock_request.c"
1188 #include "selftests/i915_request.c"
1189 #endif