33bf25ba506eb26372b639c1d3d62c08dfcc8360
[linux-2.6-microblaze.git] / drivers / gpu / drm / panfrost / panfrost_job.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2019 Collabora ltd. */
4 #include <linux/delay.h>
5 #include <linux/interrupt.h>
6 #include <linux/io.h>
7 #include <linux/platform_device.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/dma-resv.h>
10 #include <drm/gpu_scheduler.h>
11 #include <drm/panfrost_drm.h>
12
13 #include "panfrost_device.h"
14 #include "panfrost_devfreq.h"
15 #include "panfrost_job.h"
16 #include "panfrost_features.h"
17 #include "panfrost_issues.h"
18 #include "panfrost_gem.h"
19 #include "panfrost_regs.h"
20 #include "panfrost_gpu.h"
21 #include "panfrost_mmu.h"
22
23 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
24 #define job_read(dev, reg) readl(dev->iomem + (reg))
25
26 struct panfrost_queue_state {
27         struct drm_gpu_scheduler sched;
28
29         u64 fence_context;
30         u64 emit_seqno;
31 };
32
33 struct panfrost_job_slot {
34         struct panfrost_queue_state queue[NUM_JOB_SLOTS];
35         spinlock_t job_lock;
36 };
37
38 static struct panfrost_job *
39 to_panfrost_job(struct drm_sched_job *sched_job)
40 {
41         return container_of(sched_job, struct panfrost_job, base);
42 }
43
44 struct panfrost_fence {
45         struct dma_fence base;
46         struct drm_device *dev;
47         /* panfrost seqno for signaled() test */
48         u64 seqno;
49         int queue;
50 };
51
52 static inline struct panfrost_fence *
53 to_panfrost_fence(struct dma_fence *fence)
54 {
55         return (struct panfrost_fence *)fence;
56 }
57
58 static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
59 {
60         return "panfrost";
61 }
62
63 static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
64 {
65         struct panfrost_fence *f = to_panfrost_fence(fence);
66
67         switch (f->queue) {
68         case 0:
69                 return "panfrost-js-0";
70         case 1:
71                 return "panfrost-js-1";
72         case 2:
73                 return "panfrost-js-2";
74         default:
75                 return NULL;
76         }
77 }
78
79 static const struct dma_fence_ops panfrost_fence_ops = {
80         .get_driver_name = panfrost_fence_get_driver_name,
81         .get_timeline_name = panfrost_fence_get_timeline_name,
82 };
83
84 static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
85 {
86         struct panfrost_fence *fence;
87         struct panfrost_job_slot *js = pfdev->js;
88
89         fence = kzalloc(sizeof(*fence), GFP_KERNEL);
90         if (!fence)
91                 return ERR_PTR(-ENOMEM);
92
93         fence->dev = pfdev->ddev;
94         fence->queue = js_num;
95         fence->seqno = ++js->queue[js_num].emit_seqno;
96         dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
97                        js->queue[js_num].fence_context, fence->seqno);
98
99         return &fence->base;
100 }
101
102 static int panfrost_job_get_slot(struct panfrost_job *job)
103 {
104         /* JS0: fragment jobs.
105          * JS1: vertex/tiler jobs
106          * JS2: compute jobs
107          */
108         if (job->requirements & PANFROST_JD_REQ_FS)
109                 return 0;
110
111 /* Not exposed to userspace yet */
112 #if 0
113         if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
114                 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
115                     (job->pfdev->features.nr_core_groups == 2))
116                         return 2;
117                 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
118                         return 2;
119         }
120 #endif
121         return 1;
122 }
123
124 static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
125                                         u32 requirements,
126                                         int js)
127 {
128         u64 affinity;
129
130         /*
131          * Use all cores for now.
132          * Eventually we may need to support tiler only jobs and h/w with
133          * multiple (2) coherent core groups
134          */
135         affinity = pfdev->features.shader_present;
136
137         job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF);
138         job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
139 }
140
141 static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
142 {
143         struct panfrost_device *pfdev = job->pfdev;
144         u32 cfg;
145         u64 jc_head = job->jc;
146         int ret;
147
148         ret = pm_runtime_get_sync(pfdev->dev);
149         if (ret < 0)
150                 return;
151
152         if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
153                 pm_runtime_put_sync_autosuspend(pfdev->dev);
154                 return;
155         }
156
157         cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
158
159         panfrost_devfreq_record_transition(pfdev, js);
160
161         job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
162         job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
163
164         panfrost_job_write_affinity(pfdev, job->requirements, js);
165
166         /* start MMU, medium priority, cache clean/flush on end, clean/flush on
167          * start */
168         cfg |= JS_CONFIG_THREAD_PRI(8) |
169                 JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
170                 JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
171
172         if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
173                 cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
174
175         if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
176                 cfg |= JS_CONFIG_START_MMU;
177
178         job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
179
180         if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
181                 job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
182
183         /* GO ! */
184         dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d] with head=0x%llx",
185                                 job, js, jc_head);
186
187         job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
188 }
189
190 static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
191                                            int bo_count,
192                                            struct dma_fence **implicit_fences)
193 {
194         int i;
195
196         for (i = 0; i < bo_count; i++)
197                 implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
198 }
199
200 static void panfrost_attach_object_fences(struct drm_gem_object **bos,
201                                           int bo_count,
202                                           struct dma_fence *fence)
203 {
204         int i;
205
206         for (i = 0; i < bo_count; i++)
207                 dma_resv_add_excl_fence(bos[i]->resv, fence);
208 }
209
210 int panfrost_job_push(struct panfrost_job *job)
211 {
212         struct panfrost_device *pfdev = job->pfdev;
213         int slot = panfrost_job_get_slot(job);
214         struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
215         struct ww_acquire_ctx acquire_ctx;
216         int ret = 0;
217
218         mutex_lock(&pfdev->sched_lock);
219
220         ret = drm_gem_lock_reservations(job->bos, job->bo_count,
221                                             &acquire_ctx);
222         if (ret) {
223                 mutex_unlock(&pfdev->sched_lock);
224                 return ret;
225         }
226
227         ret = drm_sched_job_init(&job->base, entity, NULL);
228         if (ret) {
229                 mutex_unlock(&pfdev->sched_lock);
230                 goto unlock;
231         }
232
233         job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
234
235         kref_get(&job->refcount); /* put by scheduler job completion */
236
237         panfrost_acquire_object_fences(job->bos, job->bo_count,
238                                        job->implicit_fences);
239
240         drm_sched_entity_push_job(&job->base, entity);
241
242         mutex_unlock(&pfdev->sched_lock);
243
244         panfrost_attach_object_fences(job->bos, job->bo_count,
245                                       job->render_done_fence);
246
247 unlock:
248         drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
249
250         return ret;
251 }
252
253 static void panfrost_job_cleanup(struct kref *ref)
254 {
255         struct panfrost_job *job = container_of(ref, struct panfrost_job,
256                                                 refcount);
257         unsigned int i;
258
259         if (job->in_fences) {
260                 for (i = 0; i < job->in_fence_count; i++)
261                         dma_fence_put(job->in_fences[i]);
262                 kvfree(job->in_fences);
263         }
264         if (job->implicit_fences) {
265                 for (i = 0; i < job->bo_count; i++)
266                         dma_fence_put(job->implicit_fences[i]);
267                 kvfree(job->implicit_fences);
268         }
269         dma_fence_put(job->done_fence);
270         dma_fence_put(job->render_done_fence);
271
272         if (job->bos) {
273                 for (i = 0; i < job->bo_count; i++)
274                         drm_gem_object_put_unlocked(job->bos[i]);
275                 kvfree(job->bos);
276         }
277
278         kfree(job);
279 }
280
281 void panfrost_job_put(struct panfrost_job *job)
282 {
283         kref_put(&job->refcount, panfrost_job_cleanup);
284 }
285
286 static void panfrost_job_free(struct drm_sched_job *sched_job)
287 {
288         struct panfrost_job *job = to_panfrost_job(sched_job);
289
290         drm_sched_job_cleanup(sched_job);
291
292         panfrost_job_put(job);
293 }
294
295 static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
296                                                  struct drm_sched_entity *s_entity)
297 {
298         struct panfrost_job *job = to_panfrost_job(sched_job);
299         struct dma_fence *fence;
300         unsigned int i;
301
302         /* Explicit fences */
303         for (i = 0; i < job->in_fence_count; i++) {
304                 if (job->in_fences[i]) {
305                         fence = job->in_fences[i];
306                         job->in_fences[i] = NULL;
307                         return fence;
308                 }
309         }
310
311         /* Implicit fences, max. one per BO */
312         for (i = 0; i < job->bo_count; i++) {
313                 if (job->implicit_fences[i]) {
314                         fence = job->implicit_fences[i];
315                         job->implicit_fences[i] = NULL;
316                         return fence;
317                 }
318         }
319
320         return NULL;
321 }
322
323 static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
324 {
325         struct panfrost_job *job = to_panfrost_job(sched_job);
326         struct panfrost_device *pfdev = job->pfdev;
327         int slot = panfrost_job_get_slot(job);
328         struct dma_fence *fence = NULL;
329
330         if (unlikely(job->base.s_fence->finished.error))
331                 return NULL;
332
333         pfdev->jobs[slot] = job;
334
335         fence = panfrost_fence_create(pfdev, slot);
336         if (IS_ERR(fence))
337                 return NULL;
338
339         if (job->done_fence)
340                 dma_fence_put(job->done_fence);
341         job->done_fence = dma_fence_get(fence);
342
343         panfrost_job_hw_submit(job, slot);
344
345         return fence;
346 }
347
348 void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
349 {
350         int j;
351         u32 irq_mask = 0;
352
353         for (j = 0; j < NUM_JOB_SLOTS; j++) {
354                 irq_mask |= MK_JS_MASK(j);
355         }
356
357         job_write(pfdev, JOB_INT_CLEAR, irq_mask);
358         job_write(pfdev, JOB_INT_MASK, irq_mask);
359 }
360
361 static void panfrost_job_timedout(struct drm_sched_job *sched_job)
362 {
363         struct panfrost_job *job = to_panfrost_job(sched_job);
364         struct panfrost_device *pfdev = job->pfdev;
365         int js = panfrost_job_get_slot(job);
366         unsigned long flags;
367         int i;
368
369         /*
370          * If the GPU managed to complete this jobs fence, the timeout is
371          * spurious. Bail out.
372          */
373         if (dma_fence_is_signaled(job->done_fence))
374                 return;
375
376         dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
377                 js,
378                 job_read(pfdev, JS_CONFIG(js)),
379                 job_read(pfdev, JS_STATUS(js)),
380                 job_read(pfdev, JS_HEAD_LO(js)),
381                 job_read(pfdev, JS_TAIL_LO(js)),
382                 sched_job);
383
384         if (!mutex_trylock(&pfdev->reset_lock))
385                 return;
386
387         for (i = 0; i < NUM_JOB_SLOTS; i++) {
388                 struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
389
390                 drm_sched_stop(sched, sched_job);
391                 if (js != i)
392                         /* Ensure any timeouts on other slots have finished */
393                         cancel_delayed_work_sync(&sched->work_tdr);
394         }
395
396         drm_sched_increase_karma(sched_job);
397
398         spin_lock_irqsave(&pfdev->js->job_lock, flags);
399         for (i = 0; i < NUM_JOB_SLOTS; i++) {
400                 if (pfdev->jobs[i]) {
401                         pm_runtime_put_noidle(pfdev->dev);
402                         pfdev->jobs[i] = NULL;
403                 }
404         }
405         spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
406
407         panfrost_devfreq_record_transition(pfdev, js);
408         panfrost_device_reset(pfdev);
409
410         for (i = 0; i < NUM_JOB_SLOTS; i++)
411                 drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
412
413         /* restart scheduler after GPU is usable again */
414         for (i = 0; i < NUM_JOB_SLOTS; i++)
415                 drm_sched_start(&pfdev->js->queue[i].sched, true);
416
417         mutex_unlock(&pfdev->reset_lock);
418 }
419
420 static const struct drm_sched_backend_ops panfrost_sched_ops = {
421         .dependency = panfrost_job_dependency,
422         .run_job = panfrost_job_run,
423         .timedout_job = panfrost_job_timedout,
424         .free_job = panfrost_job_free
425 };
426
427 static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
428 {
429         struct panfrost_device *pfdev = data;
430         u32 status = job_read(pfdev, JOB_INT_STAT);
431         int j;
432
433         dev_dbg(pfdev->dev, "jobslot irq status=%x\n", status);
434
435         if (!status)
436                 return IRQ_NONE;
437
438         pm_runtime_mark_last_busy(pfdev->dev);
439
440         for (j = 0; status; j++) {
441                 u32 mask = MK_JS_MASK(j);
442
443                 if (!(status & mask))
444                         continue;
445
446                 job_write(pfdev, JOB_INT_CLEAR, mask);
447
448                 if (status & JOB_INT_MASK_ERR(j)) {
449                         job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
450
451                         dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
452                                 j,
453                                 panfrost_exception_name(pfdev, job_read(pfdev, JS_STATUS(j))),
454                                 job_read(pfdev, JS_HEAD_LO(j)),
455                                 job_read(pfdev, JS_TAIL_LO(j)));
456
457                         drm_sched_fault(&pfdev->js->queue[j].sched);
458                 }
459
460                 if (status & JOB_INT_MASK_DONE(j)) {
461                         struct panfrost_job *job;
462
463                         spin_lock(&pfdev->js->job_lock);
464                         job = pfdev->jobs[j];
465                         /* Only NULL if job timeout occurred */
466                         if (job) {
467                                 pfdev->jobs[j] = NULL;
468
469                                 panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
470                                 panfrost_devfreq_record_transition(pfdev, j);
471
472                                 dma_fence_signal_locked(job->done_fence);
473                                 pm_runtime_put_autosuspend(pfdev->dev);
474                         }
475                         spin_unlock(&pfdev->js->job_lock);
476                 }
477
478                 status &= ~mask;
479         }
480
481         return IRQ_HANDLED;
482 }
483
484 int panfrost_job_init(struct panfrost_device *pfdev)
485 {
486         struct panfrost_job_slot *js;
487         int ret, j, irq;
488
489         pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
490         if (!js)
491                 return -ENOMEM;
492
493         spin_lock_init(&js->job_lock);
494
495         irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
496         if (irq <= 0)
497                 return -ENODEV;
498
499         ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
500                                IRQF_SHARED, "job", pfdev);
501         if (ret) {
502                 dev_err(pfdev->dev, "failed to request job irq");
503                 return ret;
504         }
505
506         for (j = 0; j < NUM_JOB_SLOTS; j++) {
507                 js->queue[j].fence_context = dma_fence_context_alloc(1);
508
509                 ret = drm_sched_init(&js->queue[j].sched,
510                                      &panfrost_sched_ops,
511                                      1, 0, msecs_to_jiffies(500),
512                                      "pan_js");
513                 if (ret) {
514                         dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
515                         goto err_sched;
516                 }
517         }
518
519         panfrost_job_enable_interrupts(pfdev);
520
521         return 0;
522
523 err_sched:
524         for (j--; j >= 0; j--)
525                 drm_sched_fini(&js->queue[j].sched);
526
527         return ret;
528 }
529
530 void panfrost_job_fini(struct panfrost_device *pfdev)
531 {
532         struct panfrost_job_slot *js = pfdev->js;
533         int j;
534
535         job_write(pfdev, JOB_INT_MASK, 0);
536
537         for (j = 0; j < NUM_JOB_SLOTS; j++)
538                 drm_sched_fini(&js->queue[j].sched);
539
540 }
541
542 int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
543 {
544         struct panfrost_device *pfdev = panfrost_priv->pfdev;
545         struct panfrost_job_slot *js = pfdev->js;
546         struct drm_sched_rq *rq;
547         int ret, i;
548
549         for (i = 0; i < NUM_JOB_SLOTS; i++) {
550                 rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
551                 ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL);
552                 if (WARN_ON(ret))
553                         return ret;
554         }
555         return 0;
556 }
557
558 void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
559 {
560         int i;
561
562         for (i = 0; i < NUM_JOB_SLOTS; i++)
563                 drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
564 }
565
566 int panfrost_job_is_idle(struct panfrost_device *pfdev)
567 {
568         struct panfrost_job_slot *js = pfdev->js;
569         int i;
570
571         for (i = 0; i < NUM_JOB_SLOTS; i++) {
572                 /* If there are any jobs in the HW queue, we're not idle */
573                 if (atomic_read(&js->queue[i].sched.hw_rq_count))
574                         return false;
575
576                 /* Check whether the hardware is idle */
577                 if (pfdev->devfreq.slot[i].busy)
578                         return false;
579         }
580
581         return true;
582 }