KVM: selftests: Restrict test region to 48-bit physical addresses when using nested
[linux-2.6-microblaze.git] / drivers / gpu / drm / panfrost / panfrost_job.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2019 Collabora ltd. */
4 #include <linux/delay.h>
5 #include <linux/interrupt.h>
6 #include <linux/io.h>
7 #include <linux/iopoll.h>
8 #include <linux/platform_device.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/dma-resv.h>
11 #include <drm/gpu_scheduler.h>
12 #include <drm/panfrost_drm.h>
13
14 #include "panfrost_device.h"
15 #include "panfrost_devfreq.h"
16 #include "panfrost_job.h"
17 #include "panfrost_features.h"
18 #include "panfrost_issues.h"
19 #include "panfrost_gem.h"
20 #include "panfrost_regs.h"
21 #include "panfrost_gpu.h"
22 #include "panfrost_mmu.h"
23
24 #define JOB_TIMEOUT_MS 500
25
26 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
27 #define job_read(dev, reg) readl(dev->iomem + (reg))
28
29 struct panfrost_queue_state {
30         struct drm_gpu_scheduler sched;
31         u64 fence_context;
32         u64 emit_seqno;
33 };
34
35 struct panfrost_job_slot {
36         struct panfrost_queue_state queue[NUM_JOB_SLOTS];
37         spinlock_t job_lock;
38         int irq;
39 };
40
41 static struct panfrost_job *
42 to_panfrost_job(struct drm_sched_job *sched_job)
43 {
44         return container_of(sched_job, struct panfrost_job, base);
45 }
46
47 struct panfrost_fence {
48         struct dma_fence base;
49         struct drm_device *dev;
50         /* panfrost seqno for signaled() test */
51         u64 seqno;
52         int queue;
53 };
54
55 static inline struct panfrost_fence *
56 to_panfrost_fence(struct dma_fence *fence)
57 {
58         return (struct panfrost_fence *)fence;
59 }
60
61 static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
62 {
63         return "panfrost";
64 }
65
66 static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
67 {
68         struct panfrost_fence *f = to_panfrost_fence(fence);
69
70         switch (f->queue) {
71         case 0:
72                 return "panfrost-js-0";
73         case 1:
74                 return "panfrost-js-1";
75         case 2:
76                 return "panfrost-js-2";
77         default:
78                 return NULL;
79         }
80 }
81
82 static const struct dma_fence_ops panfrost_fence_ops = {
83         .get_driver_name = panfrost_fence_get_driver_name,
84         .get_timeline_name = panfrost_fence_get_timeline_name,
85 };
86
87 static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
88 {
89         struct panfrost_fence *fence;
90         struct panfrost_job_slot *js = pfdev->js;
91
92         fence = kzalloc(sizeof(*fence), GFP_KERNEL);
93         if (!fence)
94                 return ERR_PTR(-ENOMEM);
95
96         fence->dev = pfdev->ddev;
97         fence->queue = js_num;
98         fence->seqno = ++js->queue[js_num].emit_seqno;
99         dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
100                        js->queue[js_num].fence_context, fence->seqno);
101
102         return &fence->base;
103 }
104
105 int panfrost_job_get_slot(struct panfrost_job *job)
106 {
107         /* JS0: fragment jobs.
108          * JS1: vertex/tiler jobs
109          * JS2: compute jobs
110          */
111         if (job->requirements & PANFROST_JD_REQ_FS)
112                 return 0;
113
114 /* Not exposed to userspace yet */
115 #if 0
116         if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
117                 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
118                     (job->pfdev->features.nr_core_groups == 2))
119                         return 2;
120                 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
121                         return 2;
122         }
123 #endif
124         return 1;
125 }
126
127 static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
128                                         u32 requirements,
129                                         int js)
130 {
131         u64 affinity;
132
133         /*
134          * Use all cores for now.
135          * Eventually we may need to support tiler only jobs and h/w with
136          * multiple (2) coherent core groups
137          */
138         affinity = pfdev->features.shader_present;
139
140         job_write(pfdev, JS_AFFINITY_NEXT_LO(js), lower_32_bits(affinity));
141         job_write(pfdev, JS_AFFINITY_NEXT_HI(js), upper_32_bits(affinity));
142 }
143
144 static u32
145 panfrost_get_job_chain_flag(const struct panfrost_job *job)
146 {
147         struct panfrost_fence *f = to_panfrost_fence(job->done_fence);
148
149         if (!panfrost_has_hw_feature(job->pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
150                 return 0;
151
152         return (f->seqno & 1) ? JS_CONFIG_JOB_CHAIN_FLAG : 0;
153 }
154
155 static struct panfrost_job *
156 panfrost_dequeue_job(struct panfrost_device *pfdev, int slot)
157 {
158         struct panfrost_job *job = pfdev->jobs[slot][0];
159
160         WARN_ON(!job);
161         pfdev->jobs[slot][0] = pfdev->jobs[slot][1];
162         pfdev->jobs[slot][1] = NULL;
163
164         return job;
165 }
166
167 static unsigned int
168 panfrost_enqueue_job(struct panfrost_device *pfdev, int slot,
169                      struct panfrost_job *job)
170 {
171         if (WARN_ON(!job))
172                 return 0;
173
174         if (!pfdev->jobs[slot][0]) {
175                 pfdev->jobs[slot][0] = job;
176                 return 0;
177         }
178
179         WARN_ON(pfdev->jobs[slot][1]);
180         pfdev->jobs[slot][1] = job;
181         WARN_ON(panfrost_get_job_chain_flag(job) ==
182                 panfrost_get_job_chain_flag(pfdev->jobs[slot][0]));
183         return 1;
184 }
185
186 static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
187 {
188         struct panfrost_device *pfdev = job->pfdev;
189         unsigned int subslot;
190         u32 cfg;
191         u64 jc_head = job->jc;
192         int ret;
193
194         panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
195
196         ret = pm_runtime_get_sync(pfdev->dev);
197         if (ret < 0)
198                 return;
199
200         if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
201                 return;
202         }
203
204         cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
205
206         job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
207         job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
208
209         panfrost_job_write_affinity(pfdev, job->requirements, js);
210
211         /* start MMU, medium priority, cache clean/flush on end, clean/flush on
212          * start */
213         cfg |= JS_CONFIG_THREAD_PRI(8) |
214                 JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
215                 JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE |
216                 panfrost_get_job_chain_flag(job);
217
218         if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
219                 cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
220
221         if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
222                 cfg |= JS_CONFIG_START_MMU;
223
224         job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
225
226         if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
227                 job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
228
229         /* GO ! */
230
231         spin_lock(&pfdev->js->job_lock);
232         subslot = panfrost_enqueue_job(pfdev, js, job);
233         /* Don't queue the job if a reset is in progress */
234         if (!atomic_read(&pfdev->reset.pending)) {
235                 job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
236                 dev_dbg(pfdev->dev,
237                         "JS: Submitting atom %p to js[%d][%d] with head=0x%llx AS %d",
238                         job, js, subslot, jc_head, cfg & 0xf);
239         }
240         spin_unlock(&pfdev->js->job_lock);
241 }
242
243 static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
244                                           int bo_count,
245                                           struct drm_sched_job *job)
246 {
247         int i, ret;
248
249         for (i = 0; i < bo_count; i++) {
250                 ret = dma_resv_reserve_fences(bos[i]->resv, 1);
251                 if (ret)
252                         return ret;
253
254                 /* panfrost always uses write mode in its current uapi */
255                 ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
256                                                               true);
257                 if (ret)
258                         return ret;
259         }
260
261         return 0;
262 }
263
264 static void panfrost_attach_object_fences(struct drm_gem_object **bos,
265                                           int bo_count,
266                                           struct dma_fence *fence)
267 {
268         int i;
269
270         for (i = 0; i < bo_count; i++)
271                 dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE);
272 }
273
274 int panfrost_job_push(struct panfrost_job *job)
275 {
276         struct panfrost_device *pfdev = job->pfdev;
277         struct ww_acquire_ctx acquire_ctx;
278         int ret = 0;
279
280         ret = drm_gem_lock_reservations(job->bos, job->bo_count,
281                                             &acquire_ctx);
282         if (ret)
283                 return ret;
284
285         mutex_lock(&pfdev->sched_lock);
286         drm_sched_job_arm(&job->base);
287
288         job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
289
290         ret = panfrost_acquire_object_fences(job->bos, job->bo_count,
291                                              &job->base);
292         if (ret) {
293                 mutex_unlock(&pfdev->sched_lock);
294                 goto unlock;
295         }
296
297         kref_get(&job->refcount); /* put by scheduler job completion */
298
299         drm_sched_entity_push_job(&job->base);
300
301         mutex_unlock(&pfdev->sched_lock);
302
303         panfrost_attach_object_fences(job->bos, job->bo_count,
304                                       job->render_done_fence);
305
306 unlock:
307         drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
308
309         return ret;
310 }
311
312 static void panfrost_job_cleanup(struct kref *ref)
313 {
314         struct panfrost_job *job = container_of(ref, struct panfrost_job,
315                                                 refcount);
316         unsigned int i;
317
318         dma_fence_put(job->done_fence);
319         dma_fence_put(job->render_done_fence);
320
321         if (job->mappings) {
322                 for (i = 0; i < job->bo_count; i++) {
323                         if (!job->mappings[i])
324                                 break;
325
326                         atomic_dec(&job->mappings[i]->obj->gpu_usecount);
327                         panfrost_gem_mapping_put(job->mappings[i]);
328                 }
329                 kvfree(job->mappings);
330         }
331
332         if (job->bos) {
333                 for (i = 0; i < job->bo_count; i++)
334                         drm_gem_object_put(job->bos[i]);
335
336                 kvfree(job->bos);
337         }
338
339         kfree(job);
340 }
341
342 void panfrost_job_put(struct panfrost_job *job)
343 {
344         kref_put(&job->refcount, panfrost_job_cleanup);
345 }
346
347 static void panfrost_job_free(struct drm_sched_job *sched_job)
348 {
349         struct panfrost_job *job = to_panfrost_job(sched_job);
350
351         drm_sched_job_cleanup(sched_job);
352
353         panfrost_job_put(job);
354 }
355
356 static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
357 {
358         struct panfrost_job *job = to_panfrost_job(sched_job);
359         struct panfrost_device *pfdev = job->pfdev;
360         int slot = panfrost_job_get_slot(job);
361         struct dma_fence *fence = NULL;
362
363         if (unlikely(job->base.s_fence->finished.error))
364                 return NULL;
365
366         /* Nothing to execute: can happen if the job has finished while
367          * we were resetting the GPU.
368          */
369         if (!job->jc)
370                 return NULL;
371
372         fence = panfrost_fence_create(pfdev, slot);
373         if (IS_ERR(fence))
374                 return fence;
375
376         if (job->done_fence)
377                 dma_fence_put(job->done_fence);
378         job->done_fence = dma_fence_get(fence);
379
380         panfrost_job_hw_submit(job, slot);
381
382         return fence;
383 }
384
385 void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
386 {
387         int j;
388         u32 irq_mask = 0;
389
390         for (j = 0; j < NUM_JOB_SLOTS; j++) {
391                 irq_mask |= MK_JS_MASK(j);
392         }
393
394         job_write(pfdev, JOB_INT_CLEAR, irq_mask);
395         job_write(pfdev, JOB_INT_MASK, irq_mask);
396 }
397
398 static void panfrost_job_handle_err(struct panfrost_device *pfdev,
399                                     struct panfrost_job *job,
400                                     unsigned int js)
401 {
402         u32 js_status = job_read(pfdev, JS_STATUS(js));
403         const char *exception_name = panfrost_exception_name(js_status);
404         bool signal_fence = true;
405
406         if (!panfrost_exception_is_fault(js_status)) {
407                 dev_dbg(pfdev->dev, "js event, js=%d, status=%s, head=0x%x, tail=0x%x",
408                         js, exception_name,
409                         job_read(pfdev, JS_HEAD_LO(js)),
410                         job_read(pfdev, JS_TAIL_LO(js)));
411         } else {
412                 dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
413                         js, exception_name,
414                         job_read(pfdev, JS_HEAD_LO(js)),
415                         job_read(pfdev, JS_TAIL_LO(js)));
416         }
417
418         if (js_status == DRM_PANFROST_EXCEPTION_STOPPED) {
419                 /* Update the job head so we can resume */
420                 job->jc = job_read(pfdev, JS_TAIL_LO(js)) |
421                           ((u64)job_read(pfdev, JS_TAIL_HI(js)) << 32);
422
423                 /* The job will be resumed, don't signal the fence */
424                 signal_fence = false;
425         } else if (js_status == DRM_PANFROST_EXCEPTION_TERMINATED) {
426                 /* Job has been hard-stopped, flag it as canceled */
427                 dma_fence_set_error(job->done_fence, -ECANCELED);
428                 job->jc = 0;
429         } else if (panfrost_exception_is_fault(js_status)) {
430                 /* We might want to provide finer-grained error code based on
431                  * the exception type, but unconditionally setting to EINVAL
432                  * is good enough for now.
433                  */
434                 dma_fence_set_error(job->done_fence, -EINVAL);
435                 job->jc = 0;
436         }
437
438         panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
439         panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
440
441         if (signal_fence)
442                 dma_fence_signal_locked(job->done_fence);
443
444         pm_runtime_put_autosuspend(pfdev->dev);
445
446         if (panfrost_exception_needs_reset(pfdev, js_status)) {
447                 atomic_set(&pfdev->reset.pending, 1);
448                 drm_sched_fault(&pfdev->js->queue[js].sched);
449         }
450 }
451
452 static void panfrost_job_handle_done(struct panfrost_device *pfdev,
453                                      struct panfrost_job *job)
454 {
455         /* Set ->jc to 0 to avoid re-submitting an already finished job (can
456          * happen when we receive the DONE interrupt while doing a GPU reset).
457          */
458         job->jc = 0;
459         panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
460         panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
461
462         dma_fence_signal_locked(job->done_fence);
463         pm_runtime_put_autosuspend(pfdev->dev);
464 }
465
466 static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
467 {
468         struct panfrost_job *done[NUM_JOB_SLOTS][2] = {};
469         struct panfrost_job *failed[NUM_JOB_SLOTS] = {};
470         u32 js_state = 0, js_events = 0;
471         unsigned int i, j;
472
473         /* First we collect all failed/done jobs. */
474         while (status) {
475                 u32 js_state_mask = 0;
476
477                 for (j = 0; j < NUM_JOB_SLOTS; j++) {
478                         if (status & MK_JS_MASK(j))
479                                 js_state_mask |= MK_JS_MASK(j);
480
481                         if (status & JOB_INT_MASK_DONE(j)) {
482                                 if (done[j][0])
483                                         done[j][1] = panfrost_dequeue_job(pfdev, j);
484                                 else
485                                         done[j][0] = panfrost_dequeue_job(pfdev, j);
486                         }
487
488                         if (status & JOB_INT_MASK_ERR(j)) {
489                                 /* Cancel the next submission. Will be submitted
490                                  * after we're done handling this failure if
491                                  * there's no reset pending.
492                                  */
493                                 job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
494                                 failed[j] = panfrost_dequeue_job(pfdev, j);
495                         }
496                 }
497
498                 /* JS_STATE is sampled when JOB_INT_CLEAR is written.
499                  * For each BIT(slot) or BIT(slot + 16) bit written to
500                  * JOB_INT_CLEAR, the corresponding bits in JS_STATE
501                  * (BIT(slot) and BIT(slot + 16)) are updated, but this
502                  * is racy. If we only have one job done at the time we
503                  * read JOB_INT_RAWSTAT but the second job fails before we
504                  * clear the status, we end up with a status containing
505                  * only the DONE bit and consider both jobs as DONE since
506                  * JS_STATE reports both NEXT and CURRENT as inactive.
507                  * To prevent that, let's repeat this clear+read steps
508                  * until status is 0.
509                  */
510                 job_write(pfdev, JOB_INT_CLEAR, status);
511                 js_state &= ~js_state_mask;
512                 js_state |= job_read(pfdev, JOB_INT_JS_STATE) & js_state_mask;
513                 js_events |= status;
514                 status = job_read(pfdev, JOB_INT_RAWSTAT);
515         }
516
517         /* Then we handle the dequeued jobs. */
518         for (j = 0; j < NUM_JOB_SLOTS; j++) {
519                 if (!(js_events & MK_JS_MASK(j)))
520                         continue;
521
522                 if (failed[j]) {
523                         panfrost_job_handle_err(pfdev, failed[j], j);
524                 } else if (pfdev->jobs[j][0] && !(js_state & MK_JS_MASK(j))) {
525                         /* When the current job doesn't fail, the JM dequeues
526                          * the next job without waiting for an ACK, this means
527                          * we can have 2 jobs dequeued and only catch the
528                          * interrupt when the second one is done. If both slots
529                          * are inactive, but one job remains in pfdev->jobs[j],
530                          * consider it done. Of course that doesn't apply if a
531                          * failure happened since we cancelled execution of the
532                          * job in _NEXT (see above).
533                          */
534                         if (WARN_ON(!done[j][0]))
535                                 done[j][0] = panfrost_dequeue_job(pfdev, j);
536                         else
537                                 done[j][1] = panfrost_dequeue_job(pfdev, j);
538                 }
539
540                 for (i = 0; i < ARRAY_SIZE(done[0]) && done[j][i]; i++)
541                         panfrost_job_handle_done(pfdev, done[j][i]);
542         }
543
544         /* And finally we requeue jobs that were waiting in the second slot
545          * and have been stopped if we detected a failure on the first slot.
546          */
547         for (j = 0; j < NUM_JOB_SLOTS; j++) {
548                 if (!(js_events & MK_JS_MASK(j)))
549                         continue;
550
551                 if (!failed[j] || !pfdev->jobs[j][0])
552                         continue;
553
554                 if (pfdev->jobs[j][0]->jc == 0) {
555                         /* The job was cancelled, signal the fence now */
556                         struct panfrost_job *canceled = panfrost_dequeue_job(pfdev, j);
557
558                         dma_fence_set_error(canceled->done_fence, -ECANCELED);
559                         panfrost_job_handle_done(pfdev, canceled);
560                 } else if (!atomic_read(&pfdev->reset.pending)) {
561                         /* Requeue the job we removed if no reset is pending */
562                         job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_START);
563                 }
564         }
565 }
566
567 static void panfrost_job_handle_irqs(struct panfrost_device *pfdev)
568 {
569         u32 status = job_read(pfdev, JOB_INT_RAWSTAT);
570
571         while (status) {
572                 pm_runtime_mark_last_busy(pfdev->dev);
573
574                 spin_lock(&pfdev->js->job_lock);
575                 panfrost_job_handle_irq(pfdev, status);
576                 spin_unlock(&pfdev->js->job_lock);
577                 status = job_read(pfdev, JOB_INT_RAWSTAT);
578         }
579 }
580
581 static u32 panfrost_active_slots(struct panfrost_device *pfdev,
582                                  u32 *js_state_mask, u32 js_state)
583 {
584         u32 rawstat;
585
586         if (!(js_state & *js_state_mask))
587                 return 0;
588
589         rawstat = job_read(pfdev, JOB_INT_RAWSTAT);
590         if (rawstat) {
591                 unsigned int i;
592
593                 for (i = 0; i < NUM_JOB_SLOTS; i++) {
594                         if (rawstat & MK_JS_MASK(i))
595                                 *js_state_mask &= ~MK_JS_MASK(i);
596                 }
597         }
598
599         return js_state & *js_state_mask;
600 }
601
602 static void
603 panfrost_reset(struct panfrost_device *pfdev,
604                struct drm_sched_job *bad)
605 {
606         u32 js_state, js_state_mask = 0xffffffff;
607         unsigned int i, j;
608         bool cookie;
609         int ret;
610
611         if (!atomic_read(&pfdev->reset.pending))
612                 return;
613
614         /* Stop the schedulers.
615          *
616          * FIXME: We temporarily get out of the dma_fence_signalling section
617          * because the cleanup path generate lockdep splats when taking locks
618          * to release job resources. We should rework the code to follow this
619          * pattern:
620          *
621          *      try_lock
622          *      if (locked)
623          *              release
624          *      else
625          *              schedule_work_to_release_later
626          */
627         for (i = 0; i < NUM_JOB_SLOTS; i++)
628                 drm_sched_stop(&pfdev->js->queue[i].sched, bad);
629
630         cookie = dma_fence_begin_signalling();
631
632         if (bad)
633                 drm_sched_increase_karma(bad);
634
635         /* Mask job interrupts and synchronize to make sure we won't be
636          * interrupted during our reset.
637          */
638         job_write(pfdev, JOB_INT_MASK, 0);
639         synchronize_irq(pfdev->js->irq);
640
641         for (i = 0; i < NUM_JOB_SLOTS; i++) {
642                 /* Cancel the next job and soft-stop the running job. */
643                 job_write(pfdev, JS_COMMAND_NEXT(i), JS_COMMAND_NOP);
644                 job_write(pfdev, JS_COMMAND(i), JS_COMMAND_SOFT_STOP);
645         }
646
647         /* Wait at most 10ms for soft-stops to complete */
648         ret = readl_poll_timeout(pfdev->iomem + JOB_INT_JS_STATE, js_state,
649                                  !panfrost_active_slots(pfdev, &js_state_mask, js_state),
650                                  10, 10000);
651
652         if (ret)
653                 dev_err(pfdev->dev, "Soft-stop failed\n");
654
655         /* Handle the remaining interrupts before we reset. */
656         panfrost_job_handle_irqs(pfdev);
657
658         /* Remaining interrupts have been handled, but we might still have
659          * stuck jobs. Let's make sure the PM counters stay balanced by
660          * manually calling pm_runtime_put_noidle() and
661          * panfrost_devfreq_record_idle() for each stuck job.
662          */
663         spin_lock(&pfdev->js->job_lock);
664         for (i = 0; i < NUM_JOB_SLOTS; i++) {
665                 for (j = 0; j < ARRAY_SIZE(pfdev->jobs[0]) && pfdev->jobs[i][j]; j++) {
666                         pm_runtime_put_noidle(pfdev->dev);
667                         panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
668                 }
669         }
670         memset(pfdev->jobs, 0, sizeof(pfdev->jobs));
671         spin_unlock(&pfdev->js->job_lock);
672
673         /* Proceed with reset now. */
674         panfrost_device_reset(pfdev);
675
676         /* panfrost_device_reset() unmasks job interrupts, but we want to
677          * keep them masked a bit longer.
678          */
679         job_write(pfdev, JOB_INT_MASK, 0);
680
681         /* GPU has been reset, we can clear the reset pending bit. */
682         atomic_set(&pfdev->reset.pending, 0);
683
684         /* Now resubmit jobs that were previously queued but didn't have a
685          * chance to finish.
686          * FIXME: We temporarily get out of the DMA fence signalling section
687          * while resubmitting jobs because the job submission logic will
688          * allocate memory with the GFP_KERNEL flag which can trigger memory
689          * reclaim and exposes a lock ordering issue.
690          */
691         dma_fence_end_signalling(cookie);
692         for (i = 0; i < NUM_JOB_SLOTS; i++)
693                 drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
694         cookie = dma_fence_begin_signalling();
695
696         /* Restart the schedulers */
697         for (i = 0; i < NUM_JOB_SLOTS; i++)
698                 drm_sched_start(&pfdev->js->queue[i].sched, true);
699
700         /* Re-enable job interrupts now that everything has been restarted. */
701         job_write(pfdev, JOB_INT_MASK,
702                   GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
703                   GENMASK(NUM_JOB_SLOTS - 1, 0));
704
705         dma_fence_end_signalling(cookie);
706 }
707
708 static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
709                                                      *sched_job)
710 {
711         struct panfrost_job *job = to_panfrost_job(sched_job);
712         struct panfrost_device *pfdev = job->pfdev;
713         int js = panfrost_job_get_slot(job);
714
715         /*
716          * If the GPU managed to complete this jobs fence, the timeout is
717          * spurious. Bail out.
718          */
719         if (dma_fence_is_signaled(job->done_fence))
720                 return DRM_GPU_SCHED_STAT_NOMINAL;
721
722         dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
723                 js,
724                 job_read(pfdev, JS_CONFIG(js)),
725                 job_read(pfdev, JS_STATUS(js)),
726                 job_read(pfdev, JS_HEAD_LO(js)),
727                 job_read(pfdev, JS_TAIL_LO(js)),
728                 sched_job);
729
730         atomic_set(&pfdev->reset.pending, 1);
731         panfrost_reset(pfdev, sched_job);
732
733         return DRM_GPU_SCHED_STAT_NOMINAL;
734 }
735
736 static void panfrost_reset_work(struct work_struct *work)
737 {
738         struct panfrost_device *pfdev;
739
740         pfdev = container_of(work, struct panfrost_device, reset.work);
741         panfrost_reset(pfdev, NULL);
742 }
743
744 static const struct drm_sched_backend_ops panfrost_sched_ops = {
745         .run_job = panfrost_job_run,
746         .timedout_job = panfrost_job_timedout,
747         .free_job = panfrost_job_free
748 };
749
750 static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data)
751 {
752         struct panfrost_device *pfdev = data;
753
754         panfrost_job_handle_irqs(pfdev);
755         job_write(pfdev, JOB_INT_MASK,
756                   GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
757                   GENMASK(NUM_JOB_SLOTS - 1, 0));
758         return IRQ_HANDLED;
759 }
760
761 static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
762 {
763         struct panfrost_device *pfdev = data;
764         u32 status = job_read(pfdev, JOB_INT_STAT);
765
766         if (!status)
767                 return IRQ_NONE;
768
769         job_write(pfdev, JOB_INT_MASK, 0);
770         return IRQ_WAKE_THREAD;
771 }
772
773 int panfrost_job_init(struct panfrost_device *pfdev)
774 {
775         struct panfrost_job_slot *js;
776         unsigned int nentries = 2;
777         int ret, j;
778
779         /* All GPUs have two entries per queue, but without jobchain
780          * disambiguation stopping the right job in the close path is tricky,
781          * so let's just advertise one entry in that case.
782          */
783         if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
784                 nentries = 1;
785
786         pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
787         if (!js)
788                 return -ENOMEM;
789
790         INIT_WORK(&pfdev->reset.work, panfrost_reset_work);
791         spin_lock_init(&js->job_lock);
792
793         js->irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
794         if (js->irq <= 0)
795                 return -ENODEV;
796
797         ret = devm_request_threaded_irq(pfdev->dev, js->irq,
798                                         panfrost_job_irq_handler,
799                                         panfrost_job_irq_handler_thread,
800                                         IRQF_SHARED, KBUILD_MODNAME "-job",
801                                         pfdev);
802         if (ret) {
803                 dev_err(pfdev->dev, "failed to request job irq");
804                 return ret;
805         }
806
807         pfdev->reset.wq = alloc_ordered_workqueue("panfrost-reset", 0);
808         if (!pfdev->reset.wq)
809                 return -ENOMEM;
810
811         for (j = 0; j < NUM_JOB_SLOTS; j++) {
812                 js->queue[j].fence_context = dma_fence_context_alloc(1);
813
814                 ret = drm_sched_init(&js->queue[j].sched,
815                                      &panfrost_sched_ops,
816                                      nentries, 0,
817                                      msecs_to_jiffies(JOB_TIMEOUT_MS),
818                                      pfdev->reset.wq,
819                                      NULL, "pan_js", pfdev->dev);
820                 if (ret) {
821                         dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
822                         goto err_sched;
823                 }
824         }
825
826         panfrost_job_enable_interrupts(pfdev);
827
828         return 0;
829
830 err_sched:
831         for (j--; j >= 0; j--)
832                 drm_sched_fini(&js->queue[j].sched);
833
834         destroy_workqueue(pfdev->reset.wq);
835         return ret;
836 }
837
838 void panfrost_job_fini(struct panfrost_device *pfdev)
839 {
840         struct panfrost_job_slot *js = pfdev->js;
841         int j;
842
843         job_write(pfdev, JOB_INT_MASK, 0);
844
845         for (j = 0; j < NUM_JOB_SLOTS; j++) {
846                 drm_sched_fini(&js->queue[j].sched);
847         }
848
849         cancel_work_sync(&pfdev->reset.work);
850         destroy_workqueue(pfdev->reset.wq);
851 }
852
853 int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
854 {
855         struct panfrost_device *pfdev = panfrost_priv->pfdev;
856         struct panfrost_job_slot *js = pfdev->js;
857         struct drm_gpu_scheduler *sched;
858         int ret, i;
859
860         for (i = 0; i < NUM_JOB_SLOTS; i++) {
861                 sched = &js->queue[i].sched;
862                 ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
863                                             DRM_SCHED_PRIORITY_NORMAL, &sched,
864                                             1, NULL);
865                 if (WARN_ON(ret))
866                         return ret;
867         }
868         return 0;
869 }
870
871 void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
872 {
873         struct panfrost_device *pfdev = panfrost_priv->pfdev;
874         int i;
875
876         for (i = 0; i < NUM_JOB_SLOTS; i++)
877                 drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
878
879         /* Kill in-flight jobs */
880         spin_lock(&pfdev->js->job_lock);
881         for (i = 0; i < NUM_JOB_SLOTS; i++) {
882                 struct drm_sched_entity *entity = &panfrost_priv->sched_entity[i];
883                 int j;
884
885                 for (j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) {
886                         struct panfrost_job *job = pfdev->jobs[i][j];
887                         u32 cmd;
888
889                         if (!job || job->base.entity != entity)
890                                 continue;
891
892                         if (j == 1) {
893                                 /* Try to cancel the job before it starts */
894                                 job_write(pfdev, JS_COMMAND_NEXT(i), JS_COMMAND_NOP);
895                                 /* Reset the job head so it doesn't get restarted if
896                                  * the job in the first slot failed.
897                                  */
898                                 job->jc = 0;
899                         }
900
901                         if (panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
902                                 cmd = panfrost_get_job_chain_flag(job) ?
903                                       JS_COMMAND_HARD_STOP_1 :
904                                       JS_COMMAND_HARD_STOP_0;
905                         } else {
906                                 cmd = JS_COMMAND_HARD_STOP;
907                         }
908
909                         job_write(pfdev, JS_COMMAND(i), cmd);
910                 }
911         }
912         spin_unlock(&pfdev->js->job_lock);
913 }
914
915 int panfrost_job_is_idle(struct panfrost_device *pfdev)
916 {
917         struct panfrost_job_slot *js = pfdev->js;
918         int i;
919
920         for (i = 0; i < NUM_JOB_SLOTS; i++) {
921                 /* If there are any jobs in the HW queue, we're not idle */
922                 if (atomic_read(&js->queue[i].sched.hw_rq_count))
923                         return false;
924         }
925
926         return true;
927 }