1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2019 Collabora ltd. */
4 #include <linux/delay.h>
5 #include <linux/interrupt.h>
7 #include <linux/iopoll.h>
8 #include <linux/platform_device.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/dma-resv.h>
11 #include <drm/gpu_scheduler.h>
12 #include <drm/panfrost_drm.h>
14 #include "panfrost_device.h"
15 #include "panfrost_devfreq.h"
16 #include "panfrost_job.h"
17 #include "panfrost_features.h"
18 #include "panfrost_issues.h"
19 #include "panfrost_gem.h"
20 #include "panfrost_regs.h"
21 #include "panfrost_gpu.h"
22 #include "panfrost_mmu.h"
24 #define JOB_TIMEOUT_MS 500
26 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
27 #define job_read(dev, reg) readl(dev->iomem + (reg))
29 struct panfrost_queue_state {
30 struct drm_gpu_scheduler sched;
35 struct panfrost_job_slot {
36 struct panfrost_queue_state queue[NUM_JOB_SLOTS];
41 static struct panfrost_job *
42 to_panfrost_job(struct drm_sched_job *sched_job)
44 return container_of(sched_job, struct panfrost_job, base);
47 struct panfrost_fence {
48 struct dma_fence base;
49 struct drm_device *dev;
50 /* panfrost seqno for signaled() test */
55 static inline struct panfrost_fence *
56 to_panfrost_fence(struct dma_fence *fence)
58 return (struct panfrost_fence *)fence;
61 static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
66 static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
68 struct panfrost_fence *f = to_panfrost_fence(fence);
72 return "panfrost-js-0";
74 return "panfrost-js-1";
76 return "panfrost-js-2";
82 static const struct dma_fence_ops panfrost_fence_ops = {
83 .get_driver_name = panfrost_fence_get_driver_name,
84 .get_timeline_name = panfrost_fence_get_timeline_name,
87 static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
89 struct panfrost_fence *fence;
90 struct panfrost_job_slot *js = pfdev->js;
92 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
94 return ERR_PTR(-ENOMEM);
96 fence->dev = pfdev->ddev;
97 fence->queue = js_num;
98 fence->seqno = ++js->queue[js_num].emit_seqno;
99 dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
100 js->queue[js_num].fence_context, fence->seqno);
105 int panfrost_job_get_slot(struct panfrost_job *job)
107 /* JS0: fragment jobs.
108 * JS1: vertex/tiler jobs
111 if (job->requirements & PANFROST_JD_REQ_FS)
114 /* Not exposed to userspace yet */
116 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
117 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
118 (job->pfdev->features.nr_core_groups == 2))
120 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
127 static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
134 * Use all cores for now.
135 * Eventually we may need to support tiler only jobs and h/w with
136 * multiple (2) coherent core groups
138 affinity = pfdev->features.shader_present;
140 job_write(pfdev, JS_AFFINITY_NEXT_LO(js), lower_32_bits(affinity));
141 job_write(pfdev, JS_AFFINITY_NEXT_HI(js), upper_32_bits(affinity));
145 panfrost_get_job_chain_flag(const struct panfrost_job *job)
147 struct panfrost_fence *f = to_panfrost_fence(job->done_fence);
149 if (!panfrost_has_hw_feature(job->pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
152 return (f->seqno & 1) ? JS_CONFIG_JOB_CHAIN_FLAG : 0;
155 static struct panfrost_job *
156 panfrost_dequeue_job(struct panfrost_device *pfdev, int slot)
158 struct panfrost_job *job = pfdev->jobs[slot][0];
161 pfdev->jobs[slot][0] = pfdev->jobs[slot][1];
162 pfdev->jobs[slot][1] = NULL;
168 panfrost_enqueue_job(struct panfrost_device *pfdev, int slot,
169 struct panfrost_job *job)
174 if (!pfdev->jobs[slot][0]) {
175 pfdev->jobs[slot][0] = job;
179 WARN_ON(pfdev->jobs[slot][1]);
180 pfdev->jobs[slot][1] = job;
181 WARN_ON(panfrost_get_job_chain_flag(job) ==
182 panfrost_get_job_chain_flag(pfdev->jobs[slot][0]));
186 static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
188 struct panfrost_device *pfdev = job->pfdev;
189 unsigned int subslot;
191 u64 jc_head = job->jc;
194 panfrost_devfreq_record_busy(&pfdev->pfdevfreq);
196 ret = pm_runtime_get_sync(pfdev->dev);
200 if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
204 cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
206 job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
207 job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
209 panfrost_job_write_affinity(pfdev, job->requirements, js);
211 /* start MMU, medium priority, cache clean/flush on end, clean/flush on
213 cfg |= JS_CONFIG_THREAD_PRI(8) |
214 JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
215 JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE |
216 panfrost_get_job_chain_flag(job);
218 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
219 cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
221 if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
222 cfg |= JS_CONFIG_START_MMU;
224 job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
226 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
227 job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
231 spin_lock(&pfdev->js->job_lock);
232 subslot = panfrost_enqueue_job(pfdev, js, job);
233 /* Don't queue the job if a reset is in progress */
234 if (!atomic_read(&pfdev->reset.pending)) {
235 job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
237 "JS: Submitting atom %p to js[%d][%d] with head=0x%llx AS %d",
238 job, js, subslot, jc_head, cfg & 0xf);
240 spin_unlock(&pfdev->js->job_lock);
243 static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
245 struct drm_sched_job *job)
249 for (i = 0; i < bo_count; i++) {
250 /* panfrost always uses write mode in its current uapi */
251 ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
260 static void panfrost_attach_object_fences(struct drm_gem_object **bos,
262 struct dma_fence *fence)
266 for (i = 0; i < bo_count; i++)
267 dma_resv_add_excl_fence(bos[i]->resv, fence);
270 int panfrost_job_push(struct panfrost_job *job)
272 struct panfrost_device *pfdev = job->pfdev;
273 struct ww_acquire_ctx acquire_ctx;
276 ret = drm_gem_lock_reservations(job->bos, job->bo_count,
281 mutex_lock(&pfdev->sched_lock);
282 drm_sched_job_arm(&job->base);
284 job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
286 ret = panfrost_acquire_object_fences(job->bos, job->bo_count,
289 mutex_unlock(&pfdev->sched_lock);
293 kref_get(&job->refcount); /* put by scheduler job completion */
295 drm_sched_entity_push_job(&job->base);
297 mutex_unlock(&pfdev->sched_lock);
299 panfrost_attach_object_fences(job->bos, job->bo_count,
300 job->render_done_fence);
303 drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
308 static void panfrost_job_cleanup(struct kref *ref)
310 struct panfrost_job *job = container_of(ref, struct panfrost_job,
314 dma_fence_put(job->done_fence);
315 dma_fence_put(job->render_done_fence);
318 for (i = 0; i < job->bo_count; i++) {
319 if (!job->mappings[i])
322 atomic_dec(&job->mappings[i]->obj->gpu_usecount);
323 panfrost_gem_mapping_put(job->mappings[i]);
325 kvfree(job->mappings);
329 for (i = 0; i < job->bo_count; i++)
330 drm_gem_object_put(job->bos[i]);
338 void panfrost_job_put(struct panfrost_job *job)
340 kref_put(&job->refcount, panfrost_job_cleanup);
343 static void panfrost_job_free(struct drm_sched_job *sched_job)
345 struct panfrost_job *job = to_panfrost_job(sched_job);
347 drm_sched_job_cleanup(sched_job);
349 panfrost_job_put(job);
352 static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
354 struct panfrost_job *job = to_panfrost_job(sched_job);
355 struct panfrost_device *pfdev = job->pfdev;
356 int slot = panfrost_job_get_slot(job);
357 struct dma_fence *fence = NULL;
359 if (unlikely(job->base.s_fence->finished.error))
362 /* Nothing to execute: can happen if the job has finished while
363 * we were resetting the GPU.
368 fence = panfrost_fence_create(pfdev, slot);
373 dma_fence_put(job->done_fence);
374 job->done_fence = dma_fence_get(fence);
376 panfrost_job_hw_submit(job, slot);
381 void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
386 for (j = 0; j < NUM_JOB_SLOTS; j++) {
387 irq_mask |= MK_JS_MASK(j);
390 job_write(pfdev, JOB_INT_CLEAR, irq_mask);
391 job_write(pfdev, JOB_INT_MASK, irq_mask);
394 static void panfrost_job_handle_err(struct panfrost_device *pfdev,
395 struct panfrost_job *job,
398 u32 js_status = job_read(pfdev, JS_STATUS(js));
399 const char *exception_name = panfrost_exception_name(js_status);
400 bool signal_fence = true;
402 if (!panfrost_exception_is_fault(js_status)) {
403 dev_dbg(pfdev->dev, "js event, js=%d, status=%s, head=0x%x, tail=0x%x",
405 job_read(pfdev, JS_HEAD_LO(js)),
406 job_read(pfdev, JS_TAIL_LO(js)));
408 dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
410 job_read(pfdev, JS_HEAD_LO(js)),
411 job_read(pfdev, JS_TAIL_LO(js)));
414 if (js_status == DRM_PANFROST_EXCEPTION_STOPPED) {
415 /* Update the job head so we can resume */
416 job->jc = job_read(pfdev, JS_TAIL_LO(js)) |
417 ((u64)job_read(pfdev, JS_TAIL_HI(js)) << 32);
419 /* The job will be resumed, don't signal the fence */
420 signal_fence = false;
421 } else if (js_status == DRM_PANFROST_EXCEPTION_TERMINATED) {
422 /* Job has been hard-stopped, flag it as canceled */
423 dma_fence_set_error(job->done_fence, -ECANCELED);
425 } else if (panfrost_exception_is_fault(js_status)) {
426 /* We might want to provide finer-grained error code based on
427 * the exception type, but unconditionally setting to EINVAL
428 * is good enough for now.
430 dma_fence_set_error(job->done_fence, -EINVAL);
434 panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
435 panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
438 dma_fence_signal_locked(job->done_fence);
440 pm_runtime_put_autosuspend(pfdev->dev);
442 if (panfrost_exception_needs_reset(pfdev, js_status)) {
443 atomic_set(&pfdev->reset.pending, 1);
444 drm_sched_fault(&pfdev->js->queue[js].sched);
448 static void panfrost_job_handle_done(struct panfrost_device *pfdev,
449 struct panfrost_job *job)
451 /* Set ->jc to 0 to avoid re-submitting an already finished job (can
452 * happen when we receive the DONE interrupt while doing a GPU reset).
455 panfrost_mmu_as_put(pfdev, job->file_priv->mmu);
456 panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
458 dma_fence_signal_locked(job->done_fence);
459 pm_runtime_put_autosuspend(pfdev->dev);
462 static void panfrost_job_handle_irq(struct panfrost_device *pfdev, u32 status)
464 struct panfrost_job *done[NUM_JOB_SLOTS][2] = {};
465 struct panfrost_job *failed[NUM_JOB_SLOTS] = {};
466 u32 js_state = 0, js_events = 0;
469 /* First we collect all failed/done jobs. */
471 u32 js_state_mask = 0;
473 for (j = 0; j < NUM_JOB_SLOTS; j++) {
474 if (status & MK_JS_MASK(j))
475 js_state_mask |= MK_JS_MASK(j);
477 if (status & JOB_INT_MASK_DONE(j)) {
479 done[j][1] = panfrost_dequeue_job(pfdev, j);
481 done[j][0] = panfrost_dequeue_job(pfdev, j);
484 if (status & JOB_INT_MASK_ERR(j)) {
485 /* Cancel the next submission. Will be submitted
486 * after we're done handling this failure if
487 * there's no reset pending.
489 job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
490 failed[j] = panfrost_dequeue_job(pfdev, j);
494 /* JS_STATE is sampled when JOB_INT_CLEAR is written.
495 * For each BIT(slot) or BIT(slot + 16) bit written to
496 * JOB_INT_CLEAR, the corresponding bits in JS_STATE
497 * (BIT(slot) and BIT(slot + 16)) are updated, but this
498 * is racy. If we only have one job done at the time we
499 * read JOB_INT_RAWSTAT but the second job fails before we
500 * clear the status, we end up with a status containing
501 * only the DONE bit and consider both jobs as DONE since
502 * JS_STATE reports both NEXT and CURRENT as inactive.
503 * To prevent that, let's repeat this clear+read steps
506 job_write(pfdev, JOB_INT_CLEAR, status);
507 js_state &= ~js_state_mask;
508 js_state |= job_read(pfdev, JOB_INT_JS_STATE) & js_state_mask;
510 status = job_read(pfdev, JOB_INT_RAWSTAT);
513 /* Then we handle the dequeued jobs. */
514 for (j = 0; j < NUM_JOB_SLOTS; j++) {
515 if (!(js_events & MK_JS_MASK(j)))
519 panfrost_job_handle_err(pfdev, failed[j], j);
520 } else if (pfdev->jobs[j][0] && !(js_state & MK_JS_MASK(j))) {
521 /* When the current job doesn't fail, the JM dequeues
522 * the next job without waiting for an ACK, this means
523 * we can have 2 jobs dequeued and only catch the
524 * interrupt when the second one is done. If both slots
525 * are inactive, but one job remains in pfdev->jobs[j],
526 * consider it done. Of course that doesn't apply if a
527 * failure happened since we cancelled execution of the
528 * job in _NEXT (see above).
530 if (WARN_ON(!done[j][0]))
531 done[j][0] = panfrost_dequeue_job(pfdev, j);
533 done[j][1] = panfrost_dequeue_job(pfdev, j);
536 for (i = 0; i < ARRAY_SIZE(done[0]) && done[j][i]; i++)
537 panfrost_job_handle_done(pfdev, done[j][i]);
540 /* And finally we requeue jobs that were waiting in the second slot
541 * and have been stopped if we detected a failure on the first slot.
543 for (j = 0; j < NUM_JOB_SLOTS; j++) {
544 if (!(js_events & MK_JS_MASK(j)))
547 if (!failed[j] || !pfdev->jobs[j][0])
550 if (pfdev->jobs[j][0]->jc == 0) {
551 /* The job was cancelled, signal the fence now */
552 struct panfrost_job *canceled = panfrost_dequeue_job(pfdev, j);
554 dma_fence_set_error(canceled->done_fence, -ECANCELED);
555 panfrost_job_handle_done(pfdev, canceled);
556 } else if (!atomic_read(&pfdev->reset.pending)) {
557 /* Requeue the job we removed if no reset is pending */
558 job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_START);
563 static void panfrost_job_handle_irqs(struct panfrost_device *pfdev)
565 u32 status = job_read(pfdev, JOB_INT_RAWSTAT);
568 pm_runtime_mark_last_busy(pfdev->dev);
570 spin_lock(&pfdev->js->job_lock);
571 panfrost_job_handle_irq(pfdev, status);
572 spin_unlock(&pfdev->js->job_lock);
573 status = job_read(pfdev, JOB_INT_RAWSTAT);
577 static u32 panfrost_active_slots(struct panfrost_device *pfdev,
578 u32 *js_state_mask, u32 js_state)
582 if (!(js_state & *js_state_mask))
585 rawstat = job_read(pfdev, JOB_INT_RAWSTAT);
589 for (i = 0; i < NUM_JOB_SLOTS; i++) {
590 if (rawstat & MK_JS_MASK(i))
591 *js_state_mask &= ~MK_JS_MASK(i);
595 return js_state & *js_state_mask;
599 panfrost_reset(struct panfrost_device *pfdev,
600 struct drm_sched_job *bad)
602 u32 js_state, js_state_mask = 0xffffffff;
607 if (!atomic_read(&pfdev->reset.pending))
610 /* Stop the schedulers.
612 * FIXME: We temporarily get out of the dma_fence_signalling section
613 * because the cleanup path generate lockdep splats when taking locks
614 * to release job resources. We should rework the code to follow this
621 * schedule_work_to_release_later
623 for (i = 0; i < NUM_JOB_SLOTS; i++)
624 drm_sched_stop(&pfdev->js->queue[i].sched, bad);
626 cookie = dma_fence_begin_signalling();
629 drm_sched_increase_karma(bad);
631 /* Mask job interrupts and synchronize to make sure we won't be
632 * interrupted during our reset.
634 job_write(pfdev, JOB_INT_MASK, 0);
635 synchronize_irq(pfdev->js->irq);
637 for (i = 0; i < NUM_JOB_SLOTS; i++) {
638 /* Cancel the next job and soft-stop the running job. */
639 job_write(pfdev, JS_COMMAND_NEXT(i), JS_COMMAND_NOP);
640 job_write(pfdev, JS_COMMAND(i), JS_COMMAND_SOFT_STOP);
643 /* Wait at most 10ms for soft-stops to complete */
644 ret = readl_poll_timeout(pfdev->iomem + JOB_INT_JS_STATE, js_state,
645 !panfrost_active_slots(pfdev, &js_state_mask, js_state),
649 dev_err(pfdev->dev, "Soft-stop failed\n");
651 /* Handle the remaining interrupts before we reset. */
652 panfrost_job_handle_irqs(pfdev);
654 /* Remaining interrupts have been handled, but we might still have
655 * stuck jobs. Let's make sure the PM counters stay balanced by
656 * manually calling pm_runtime_put_noidle() and
657 * panfrost_devfreq_record_idle() for each stuck job.
659 spin_lock(&pfdev->js->job_lock);
660 for (i = 0; i < NUM_JOB_SLOTS; i++) {
661 for (j = 0; j < ARRAY_SIZE(pfdev->jobs[0]) && pfdev->jobs[i][j]; j++) {
662 pm_runtime_put_noidle(pfdev->dev);
663 panfrost_devfreq_record_idle(&pfdev->pfdevfreq);
666 memset(pfdev->jobs, 0, sizeof(pfdev->jobs));
667 spin_unlock(&pfdev->js->job_lock);
669 /* Proceed with reset now. */
670 panfrost_device_reset(pfdev);
672 /* panfrost_device_reset() unmasks job interrupts, but we want to
673 * keep them masked a bit longer.
675 job_write(pfdev, JOB_INT_MASK, 0);
677 /* GPU has been reset, we can clear the reset pending bit. */
678 atomic_set(&pfdev->reset.pending, 0);
680 /* Now resubmit jobs that were previously queued but didn't have a
682 * FIXME: We temporarily get out of the DMA fence signalling section
683 * while resubmitting jobs because the job submission logic will
684 * allocate memory with the GFP_KERNEL flag which can trigger memory
685 * reclaim and exposes a lock ordering issue.
687 dma_fence_end_signalling(cookie);
688 for (i = 0; i < NUM_JOB_SLOTS; i++)
689 drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
690 cookie = dma_fence_begin_signalling();
692 /* Restart the schedulers */
693 for (i = 0; i < NUM_JOB_SLOTS; i++)
694 drm_sched_start(&pfdev->js->queue[i].sched, true);
696 /* Re-enable job interrupts now that everything has been restarted. */
697 job_write(pfdev, JOB_INT_MASK,
698 GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
699 GENMASK(NUM_JOB_SLOTS - 1, 0));
701 dma_fence_end_signalling(cookie);
704 static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
707 struct panfrost_job *job = to_panfrost_job(sched_job);
708 struct panfrost_device *pfdev = job->pfdev;
709 int js = panfrost_job_get_slot(job);
712 * If the GPU managed to complete this jobs fence, the timeout is
713 * spurious. Bail out.
715 if (dma_fence_is_signaled(job->done_fence))
716 return DRM_GPU_SCHED_STAT_NOMINAL;
718 dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
720 job_read(pfdev, JS_CONFIG(js)),
721 job_read(pfdev, JS_STATUS(js)),
722 job_read(pfdev, JS_HEAD_LO(js)),
723 job_read(pfdev, JS_TAIL_LO(js)),
726 atomic_set(&pfdev->reset.pending, 1);
727 panfrost_reset(pfdev, sched_job);
729 return DRM_GPU_SCHED_STAT_NOMINAL;
732 static void panfrost_reset_work(struct work_struct *work)
734 struct panfrost_device *pfdev;
736 pfdev = container_of(work, struct panfrost_device, reset.work);
737 panfrost_reset(pfdev, NULL);
740 static const struct drm_sched_backend_ops panfrost_sched_ops = {
741 .run_job = panfrost_job_run,
742 .timedout_job = panfrost_job_timedout,
743 .free_job = panfrost_job_free
746 static irqreturn_t panfrost_job_irq_handler_thread(int irq, void *data)
748 struct panfrost_device *pfdev = data;
750 panfrost_job_handle_irqs(pfdev);
751 job_write(pfdev, JOB_INT_MASK,
752 GENMASK(16 + NUM_JOB_SLOTS - 1, 16) |
753 GENMASK(NUM_JOB_SLOTS - 1, 0));
757 static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
759 struct panfrost_device *pfdev = data;
760 u32 status = job_read(pfdev, JOB_INT_STAT);
765 job_write(pfdev, JOB_INT_MASK, 0);
766 return IRQ_WAKE_THREAD;
769 int panfrost_job_init(struct panfrost_device *pfdev)
771 struct panfrost_job_slot *js;
772 unsigned int nentries = 2;
775 /* All GPUs have two entries per queue, but without jobchain
776 * disambiguation stopping the right job in the close path is tricky,
777 * so let's just advertise one entry in that case.
779 if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION))
782 pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
786 INIT_WORK(&pfdev->reset.work, panfrost_reset_work);
787 spin_lock_init(&js->job_lock);
789 js->irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
793 ret = devm_request_threaded_irq(pfdev->dev, js->irq,
794 panfrost_job_irq_handler,
795 panfrost_job_irq_handler_thread,
796 IRQF_SHARED, KBUILD_MODNAME "-job",
799 dev_err(pfdev->dev, "failed to request job irq");
803 pfdev->reset.wq = alloc_ordered_workqueue("panfrost-reset", 0);
804 if (!pfdev->reset.wq)
807 for (j = 0; j < NUM_JOB_SLOTS; j++) {
808 js->queue[j].fence_context = dma_fence_context_alloc(1);
810 ret = drm_sched_init(&js->queue[j].sched,
813 msecs_to_jiffies(JOB_TIMEOUT_MS),
815 NULL, "pan_js", pfdev->dev);
817 dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
822 panfrost_job_enable_interrupts(pfdev);
827 for (j--; j >= 0; j--)
828 drm_sched_fini(&js->queue[j].sched);
830 destroy_workqueue(pfdev->reset.wq);
834 void panfrost_job_fini(struct panfrost_device *pfdev)
836 struct panfrost_job_slot *js = pfdev->js;
839 job_write(pfdev, JOB_INT_MASK, 0);
841 for (j = 0; j < NUM_JOB_SLOTS; j++) {
842 drm_sched_fini(&js->queue[j].sched);
845 cancel_work_sync(&pfdev->reset.work);
846 destroy_workqueue(pfdev->reset.wq);
849 int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
851 struct panfrost_device *pfdev = panfrost_priv->pfdev;
852 struct panfrost_job_slot *js = pfdev->js;
853 struct drm_gpu_scheduler *sched;
856 for (i = 0; i < NUM_JOB_SLOTS; i++) {
857 sched = &js->queue[i].sched;
858 ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
859 DRM_SCHED_PRIORITY_NORMAL, &sched,
867 void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
869 struct panfrost_device *pfdev = panfrost_priv->pfdev;
872 for (i = 0; i < NUM_JOB_SLOTS; i++)
873 drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
875 /* Kill in-flight jobs */
876 spin_lock(&pfdev->js->job_lock);
877 for (i = 0; i < NUM_JOB_SLOTS; i++) {
878 struct drm_sched_entity *entity = &panfrost_priv->sched_entity[i];
881 for (j = ARRAY_SIZE(pfdev->jobs[0]) - 1; j >= 0; j--) {
882 struct panfrost_job *job = pfdev->jobs[i][j];
885 if (!job || job->base.entity != entity)
889 /* Try to cancel the job before it starts */
890 job_write(pfdev, JS_COMMAND_NEXT(i), JS_COMMAND_NOP);
891 /* Reset the job head so it doesn't get restarted if
892 * the job in the first slot failed.
897 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) {
898 cmd = panfrost_get_job_chain_flag(job) ?
899 JS_COMMAND_HARD_STOP_1 :
900 JS_COMMAND_HARD_STOP_0;
902 cmd = JS_COMMAND_HARD_STOP;
905 job_write(pfdev, JS_COMMAND(i), cmd);
908 spin_unlock(&pfdev->js->job_lock);
911 int panfrost_job_is_idle(struct panfrost_device *pfdev)
913 struct panfrost_job_slot *js = pfdev->js;
916 for (i = 0; i < NUM_JOB_SLOTS; i++) {
917 /* If there are any jobs in the HW queue, we're not idle */
918 if (atomic_read(&js->queue[i].sched.hw_rq_count))