2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 * The GPU scheduler provides entities which allow userspace to push jobs
28 * into software queues which are then scheduled on a hardware run queue.
29 * The software queues have a priority among them. The scheduler selects the entities
30 * from the run queue using a FIFO. The scheduler provides dependency handling
31 * features among jobs. The driver is supposed to provide callback functions for
32 * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
35 * The organisation of the scheduler is the following:
37 * 1. Each hw run queue has one scheduler
38 * 2. Each scheduler has multiple run queues with different priorities
39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40 * 3. Each scheduler run queue has a queue of entities to schedule
41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on
44 * The jobs in a entity are always scheduled in the order that they were pushed.
47 #include <linux/kthread.h>
48 #include <linux/wait.h>
49 #include <linux/sched.h>
50 #include <linux/completion.h>
51 #include <uapi/linux/sched/types.h>
53 #include <drm/drm_print.h>
54 #include <drm/gpu_scheduler.h>
55 #include <drm/spsc_queue.h>
57 #define CREATE_TRACE_POINTS
58 #include "gpu_scheduler_trace.h"
60 #define to_drm_sched_job(sched_job) \
61 container_of((sched_job), struct drm_sched_job, queue_node)
64 * drm_sched_rq_init - initialize a given run queue struct
66 * @sched: scheduler instance to associate with this run queue
67 * @rq: scheduler run queue
69 * Initializes a scheduler runqueue.
71 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
72 struct drm_sched_rq *rq)
74 spin_lock_init(&rq->lock);
75 INIT_LIST_HEAD(&rq->entities);
76 rq->current_entity = NULL;
81 * drm_sched_rq_add_entity - add an entity
83 * @rq: scheduler run queue
84 * @entity: scheduler entity
86 * Adds a scheduler entity to the run queue.
88 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
89 struct drm_sched_entity *entity)
91 if (!list_empty(&entity->list))
94 atomic_inc(rq->sched->score);
95 list_add_tail(&entity->list, &rq->entities);
96 spin_unlock(&rq->lock);
100 * drm_sched_rq_remove_entity - remove an entity
102 * @rq: scheduler run queue
103 * @entity: scheduler entity
105 * Removes a scheduler entity from the run queue.
107 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
108 struct drm_sched_entity *entity)
110 if (list_empty(&entity->list))
112 spin_lock(&rq->lock);
113 atomic_dec(rq->sched->score);
114 list_del_init(&entity->list);
115 if (rq->current_entity == entity)
116 rq->current_entity = NULL;
117 spin_unlock(&rq->lock);
121 * drm_sched_rq_select_entity - Select an entity which could provide a job to run
123 * @rq: scheduler run queue to check.
125 * Try to find a ready entity, returns NULL if none found.
127 static struct drm_sched_entity *
128 drm_sched_rq_select_entity(struct drm_sched_rq *rq)
130 struct drm_sched_entity *entity;
132 spin_lock(&rq->lock);
134 entity = rq->current_entity;
136 list_for_each_entry_continue(entity, &rq->entities, list) {
137 if (drm_sched_entity_is_ready(entity)) {
138 rq->current_entity = entity;
139 reinit_completion(&entity->entity_idle);
140 spin_unlock(&rq->lock);
146 list_for_each_entry(entity, &rq->entities, list) {
148 if (drm_sched_entity_is_ready(entity)) {
149 rq->current_entity = entity;
150 reinit_completion(&entity->entity_idle);
151 spin_unlock(&rq->lock);
155 if (entity == rq->current_entity)
159 spin_unlock(&rq->lock);
165 * drm_sched_job_done - complete a job
166 * @s_job: pointer to the job which is done
168 * Finish the job's fence and wake up the worker thread.
170 static void drm_sched_job_done(struct drm_sched_job *s_job)
172 struct drm_sched_fence *s_fence = s_job->s_fence;
173 struct drm_gpu_scheduler *sched = s_fence->sched;
175 atomic_dec(&sched->hw_rq_count);
176 atomic_dec(sched->score);
178 trace_drm_sched_process_job(s_fence);
180 dma_fence_get(&s_fence->finished);
181 drm_sched_fence_finished(s_fence);
182 dma_fence_put(&s_fence->finished);
183 wake_up_interruptible(&sched->wake_up_worker);
187 * drm_sched_job_done_cb - the callback for a done job
189 * @cb: fence callbacks
191 static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
193 struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
195 drm_sched_job_done(s_job);
199 * drm_sched_dependency_optimized
201 * @fence: the dependency fence
202 * @entity: the entity which depends on the above fence
204 * Returns true if the dependency can be optimized and false otherwise
206 bool drm_sched_dependency_optimized(struct dma_fence* fence,
207 struct drm_sched_entity *entity)
209 struct drm_gpu_scheduler *sched = entity->rq->sched;
210 struct drm_sched_fence *s_fence;
212 if (!fence || dma_fence_is_signaled(fence))
214 if (fence->context == entity->fence_context)
216 s_fence = to_drm_sched_fence(fence);
217 if (s_fence && s_fence->sched == sched)
222 EXPORT_SYMBOL(drm_sched_dependency_optimized);
225 * drm_sched_start_timeout - start timeout for reset worker
227 * @sched: scheduler instance to start the worker for
229 * Start the timeout for the given scheduler.
231 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
233 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
234 !list_empty(&sched->pending_list))
235 schedule_delayed_work(&sched->work_tdr, sched->timeout);
239 * drm_sched_fault - immediately start timeout handler
241 * @sched: scheduler where the timeout handling should be started.
243 * Start timeout handling immediately when the driver detects a hardware fault.
245 void drm_sched_fault(struct drm_gpu_scheduler *sched)
247 mod_delayed_work(system_wq, &sched->work_tdr, 0);
249 EXPORT_SYMBOL(drm_sched_fault);
252 * drm_sched_suspend_timeout - Suspend scheduler job timeout
254 * @sched: scheduler instance for which to suspend the timeout
256 * Suspend the delayed work timeout for the scheduler. This is done by
257 * modifying the delayed work timeout to an arbitrary large value,
258 * MAX_SCHEDULE_TIMEOUT in this case.
260 * Returns the timeout remaining
263 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
265 unsigned long sched_timeout, now = jiffies;
267 sched_timeout = sched->work_tdr.timer.expires;
270 * Modify the timeout to an arbitrarily large value. This also prevents
271 * the timeout to be restarted when new submissions arrive
273 if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
274 && time_after(sched_timeout, now))
275 return sched_timeout - now;
277 return sched->timeout;
279 EXPORT_SYMBOL(drm_sched_suspend_timeout);
282 * drm_sched_resume_timeout - Resume scheduler job timeout
284 * @sched: scheduler instance for which to resume the timeout
285 * @remaining: remaining timeout
287 * Resume the delayed work timeout for the scheduler.
289 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
290 unsigned long remaining)
292 spin_lock(&sched->job_list_lock);
294 if (list_empty(&sched->pending_list))
295 cancel_delayed_work(&sched->work_tdr);
297 mod_delayed_work(system_wq, &sched->work_tdr, remaining);
299 spin_unlock(&sched->job_list_lock);
301 EXPORT_SYMBOL(drm_sched_resume_timeout);
303 static void drm_sched_job_begin(struct drm_sched_job *s_job)
305 struct drm_gpu_scheduler *sched = s_job->sched;
307 spin_lock(&sched->job_list_lock);
308 list_add_tail(&s_job->list, &sched->pending_list);
309 drm_sched_start_timeout(sched);
310 spin_unlock(&sched->job_list_lock);
313 static void drm_sched_job_timedout(struct work_struct *work)
315 struct drm_gpu_scheduler *sched;
316 struct drm_sched_job *job;
318 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
320 /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
321 spin_lock(&sched->job_list_lock);
322 job = list_first_entry_or_null(&sched->pending_list,
323 struct drm_sched_job, list);
327 * Remove the bad job so it cannot be freed by concurrent
328 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
329 * is parked at which point it's safe.
331 list_del_init(&job->list);
332 spin_unlock(&sched->job_list_lock);
334 job->sched->ops->timedout_job(job);
337 * Guilty job did complete and hence needs to be manually removed
338 * See drm_sched_stop doc.
340 if (sched->free_guilty) {
341 job->sched->ops->free_job(job);
342 sched->free_guilty = false;
345 spin_unlock(&sched->job_list_lock);
348 spin_lock(&sched->job_list_lock);
349 drm_sched_start_timeout(sched);
350 spin_unlock(&sched->job_list_lock);
354 * drm_sched_increase_karma - Update sched_entity guilty flag
356 * @bad: The job guilty of time out
358 * Increment on every hang caused by the 'bad' job. If this exceeds the hang
359 * limit of the scheduler then the respective sched entity is marked guilty and
360 * jobs from it will not be scheduled further
362 void drm_sched_increase_karma(struct drm_sched_job *bad)
364 drm_sched_increase_karma_ext(bad, 1);
366 EXPORT_SYMBOL(drm_sched_increase_karma);
368 void drm_sched_reset_karma(struct drm_sched_job *bad)
370 drm_sched_increase_karma_ext(bad, 0);
372 EXPORT_SYMBOL(drm_sched_reset_karma);
375 * drm_sched_stop - stop the scheduler
377 * @sched: scheduler instance
378 * @bad: job which caused the time out
380 * Stop the scheduler and also removes and frees all completed jobs.
381 * Note: bad job will not be freed as it might be used later and so it's
382 * callers responsibility to release it manually if it's not part of the
383 * pending list any more.
386 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
388 struct drm_sched_job *s_job, *tmp;
390 kthread_park(sched->thread);
393 * Reinsert back the bad job here - now it's safe as
394 * drm_sched_get_cleanup_job cannot race against us and release the
395 * bad job at this point - we parked (waited for) any in progress
396 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
397 * now until the scheduler thread is unparked.
399 if (bad && bad->sched == sched)
401 * Add at the head of the queue to reflect it was the earliest
404 list_add(&bad->list, &sched->pending_list);
407 * Iterate the job list from later to earlier one and either deactive
408 * their HW callbacks or remove them from pending list if they already
410 * This iteration is thread safe as sched thread is stopped.
412 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
414 if (s_job->s_fence->parent &&
415 dma_fence_remove_callback(s_job->s_fence->parent,
417 atomic_dec(&sched->hw_rq_count);
420 * remove job from pending_list.
421 * Locking here is for concurrent resume timeout
423 spin_lock(&sched->job_list_lock);
424 list_del_init(&s_job->list);
425 spin_unlock(&sched->job_list_lock);
428 * Wait for job's HW fence callback to finish using s_job
429 * before releasing it.
431 * Job is still alive so fence refcount at least 1
433 dma_fence_wait(&s_job->s_fence->finished, false);
436 * We must keep bad job alive for later use during
437 * recovery by some of the drivers but leave a hint
438 * that the guilty job must be released.
441 sched->ops->free_job(s_job);
443 sched->free_guilty = true;
448 * Stop pending timer in flight as we rearm it in drm_sched_start. This
449 * avoids the pending timeout work in progress to fire right away after
450 * this TDR finished and before the newly restarted jobs had a
451 * chance to complete.
453 cancel_delayed_work(&sched->work_tdr);
456 EXPORT_SYMBOL(drm_sched_stop);
459 * drm_sched_start - recover jobs after a reset
461 * @sched: scheduler instance
462 * @full_recovery: proceed with complete sched restart
465 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
467 struct drm_sched_job *s_job, *tmp;
471 * Locking the list is not required here as the sched thread is parked
472 * so no new jobs are being inserted or removed. Also concurrent
473 * GPU recovers can't run in parallel.
475 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
476 struct dma_fence *fence = s_job->s_fence->parent;
478 atomic_inc(&sched->hw_rq_count);
484 r = dma_fence_add_callback(fence, &s_job->cb,
485 drm_sched_job_done_cb);
487 drm_sched_job_done(s_job);
489 DRM_ERROR("fence add callback failed (%d)\n",
492 drm_sched_job_done(s_job);
496 spin_lock(&sched->job_list_lock);
497 drm_sched_start_timeout(sched);
498 spin_unlock(&sched->job_list_lock);
501 kthread_unpark(sched->thread);
503 EXPORT_SYMBOL(drm_sched_start);
506 * drm_sched_resubmit_jobs - helper to relaunch jobs from the pending list
508 * @sched: scheduler instance
511 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
513 drm_sched_resubmit_jobs_ext(sched, INT_MAX);
515 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
518 * drm_sched_resubmit_jobs_ext - helper to relunch certain number of jobs from mirror ring list
520 * @sched: scheduler instance
521 * @max: job numbers to relaunch
524 void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max)
526 struct drm_sched_job *s_job, *tmp;
527 uint64_t guilty_context;
528 bool found_guilty = false;
529 struct dma_fence *fence;
532 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
533 struct drm_sched_fence *s_fence = s_job->s_fence;
538 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
540 guilty_context = s_job->s_fence->scheduled.context;
543 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
544 dma_fence_set_error(&s_fence->finished, -ECANCELED);
546 dma_fence_put(s_job->s_fence->parent);
547 fence = sched->ops->run_job(s_job);
550 if (IS_ERR_OR_NULL(fence)) {
552 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
554 s_job->s_fence->parent = NULL;
556 s_job->s_fence->parent = fence;
560 EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
563 * drm_sched_job_init - init a scheduler job
565 * @job: scheduler job to init
566 * @entity: scheduler entity to use
567 * @owner: job owner for debugging
569 * Refer to drm_sched_entity_push_job() documentation
570 * for locking considerations.
572 * Returns 0 for success, negative error code otherwise.
574 int drm_sched_job_init(struct drm_sched_job *job,
575 struct drm_sched_entity *entity,
578 struct drm_gpu_scheduler *sched;
580 drm_sched_entity_select_rq(entity);
584 sched = entity->rq->sched;
587 job->entity = entity;
588 job->s_priority = entity->rq - sched->sched_rq;
589 job->s_fence = drm_sched_fence_create(entity, owner);
592 job->id = atomic64_inc_return(&sched->job_id_count);
594 INIT_LIST_HEAD(&job->list);
598 EXPORT_SYMBOL(drm_sched_job_init);
601 * drm_sched_job_cleanup - clean up scheduler job resources
603 * @job: scheduler job to clean up
605 void drm_sched_job_cleanup(struct drm_sched_job *job)
607 dma_fence_put(&job->s_fence->finished);
610 EXPORT_SYMBOL(drm_sched_job_cleanup);
613 * drm_sched_ready - is the scheduler ready
615 * @sched: scheduler instance
617 * Return true if we can push more jobs to the hw, otherwise false.
619 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
621 return atomic_read(&sched->hw_rq_count) <
622 sched->hw_submission_limit;
626 * drm_sched_wakeup - Wake up the scheduler when it is ready
628 * @sched: scheduler instance
631 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
633 if (drm_sched_ready(sched))
634 wake_up_interruptible(&sched->wake_up_worker);
638 * drm_sched_select_entity - Select next entity to process
640 * @sched: scheduler instance
642 * Returns the entity to process or NULL if none are found.
644 static struct drm_sched_entity *
645 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
647 struct drm_sched_entity *entity;
650 if (!drm_sched_ready(sched))
653 /* Kernel run queue has higher priority than normal run queue*/
654 for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
655 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
664 * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
666 * @sched: scheduler instance
668 * Returns the next finished job from the pending list (if there is one)
669 * ready for it to be destroyed.
671 static struct drm_sched_job *
672 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
674 struct drm_sched_job *job;
677 * Don't destroy jobs while the timeout worker is running OR thread
678 * is being parked and hence assumed to not touch pending_list
680 if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
681 !cancel_delayed_work(&sched->work_tdr)) ||
682 kthread_should_park())
685 spin_lock(&sched->job_list_lock);
687 job = list_first_entry_or_null(&sched->pending_list,
688 struct drm_sched_job, list);
690 if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
691 /* remove job from pending_list */
692 list_del_init(&job->list);
695 /* queue timeout for next job */
696 drm_sched_start_timeout(sched);
699 spin_unlock(&sched->job_list_lock);
705 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
706 * @sched_list: list of drm_gpu_schedulers
707 * @num_sched_list: number of drm_gpu_schedulers in the sched_list
709 * Returns pointer of the sched with the least load or NULL if none of the
710 * drm_gpu_schedulers are ready
712 struct drm_gpu_scheduler *
713 drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
714 unsigned int num_sched_list)
716 struct drm_gpu_scheduler *sched, *picked_sched = NULL;
718 unsigned int min_score = UINT_MAX, num_score;
720 for (i = 0; i < num_sched_list; ++i) {
721 sched = sched_list[i];
724 DRM_WARN("scheduler %s is not ready, skipping",
729 num_score = atomic_read(sched->score);
730 if (num_score < min_score) {
731 min_score = num_score;
732 picked_sched = sched;
738 EXPORT_SYMBOL(drm_sched_pick_best);
741 * drm_sched_blocked - check if the scheduler is blocked
743 * @sched: scheduler instance
745 * Returns true if blocked, otherwise false.
747 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
749 if (kthread_should_park()) {
758 * drm_sched_main - main scheduler thread
760 * @param: scheduler instance
764 static int drm_sched_main(void *param)
766 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
769 sched_set_fifo_low(current);
771 while (!kthread_should_stop()) {
772 struct drm_sched_entity *entity = NULL;
773 struct drm_sched_fence *s_fence;
774 struct drm_sched_job *sched_job;
775 struct dma_fence *fence;
776 struct drm_sched_job *cleanup_job = NULL;
778 wait_event_interruptible(sched->wake_up_worker,
779 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
780 (!drm_sched_blocked(sched) &&
781 (entity = drm_sched_select_entity(sched))) ||
782 kthread_should_stop());
785 sched->ops->free_job(cleanup_job);
786 /* queue timeout for next job */
787 drm_sched_start_timeout(sched);
793 sched_job = drm_sched_entity_pop_job(entity);
795 complete(&entity->entity_idle);
800 s_fence = sched_job->s_fence;
802 atomic_inc(&sched->hw_rq_count);
803 drm_sched_job_begin(sched_job);
805 trace_drm_run_job(sched_job, entity);
806 fence = sched->ops->run_job(sched_job);
807 drm_sched_fence_scheduled(s_fence);
809 if (!IS_ERR_OR_NULL(fence)) {
810 s_fence->parent = dma_fence_get(fence);
811 r = dma_fence_add_callback(fence, &sched_job->cb,
812 drm_sched_job_done_cb);
814 drm_sched_job_done(sched_job);
816 DRM_ERROR("fence add callback failed (%d)\n",
818 dma_fence_put(fence);
821 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
823 drm_sched_job_done(sched_job);
826 wake_up(&sched->job_scheduled);
832 * drm_sched_init - Init a gpu scheduler instance
834 * @sched: scheduler instance
835 * @ops: backend operations for this scheduler
836 * @hw_submission: number of hw submissions that can be in flight
837 * @hang_limit: number of times to allow a job to hang before dropping it
838 * @timeout: timeout value in jiffies for the scheduler
839 * @score: optional score atomic shared with other schedulers
840 * @name: name used for debugging
842 * Return 0 on success, otherwise error code.
844 int drm_sched_init(struct drm_gpu_scheduler *sched,
845 const struct drm_sched_backend_ops *ops,
846 unsigned hw_submission, unsigned hang_limit, long timeout,
847 atomic_t *score, const char *name)
851 sched->hw_submission_limit = hw_submission;
853 sched->timeout = timeout;
854 sched->hang_limit = hang_limit;
855 sched->score = score ? score : &sched->_score;
856 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
857 drm_sched_rq_init(sched, &sched->sched_rq[i]);
859 init_waitqueue_head(&sched->wake_up_worker);
860 init_waitqueue_head(&sched->job_scheduled);
861 INIT_LIST_HEAD(&sched->pending_list);
862 spin_lock_init(&sched->job_list_lock);
863 atomic_set(&sched->hw_rq_count, 0);
864 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
865 atomic_set(&sched->_score, 0);
866 atomic64_set(&sched->job_id_count, 0);
868 /* Each scheduler will run on a seperate kernel thread */
869 sched->thread = kthread_run(drm_sched_main, sched, sched->name);
870 if (IS_ERR(sched->thread)) {
871 ret = PTR_ERR(sched->thread);
872 sched->thread = NULL;
873 DRM_ERROR("Failed to create scheduler for %s.\n", name);
880 EXPORT_SYMBOL(drm_sched_init);
883 * drm_sched_fini - Destroy a gpu scheduler
885 * @sched: scheduler instance
887 * Tears down and cleans up the scheduler.
889 void drm_sched_fini(struct drm_gpu_scheduler *sched)
892 kthread_stop(sched->thread);
894 /* Confirm no work left behind accessing device structures */
895 cancel_delayed_work_sync(&sched->work_tdr);
897 sched->ready = false;
899 EXPORT_SYMBOL(drm_sched_fini);
902 * drm_sched_increase_karma_ext - Update sched_entity guilty flag
904 * @bad: The job guilty of time out
905 * @type: type for increase/reset karma
908 void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
911 struct drm_sched_entity *tmp;
912 struct drm_sched_entity *entity;
913 struct drm_gpu_scheduler *sched = bad->sched;
915 /* don't change @bad's karma if it's from KERNEL RQ,
916 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
917 * corrupt but keep in mind that kernel jobs always considered good.
919 if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
921 atomic_set(&bad->karma, 0);
923 atomic_inc(&bad->karma);
925 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
927 struct drm_sched_rq *rq = &sched->sched_rq[i];
929 spin_lock(&rq->lock);
930 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
931 if (bad->s_fence->scheduled.context ==
932 entity->fence_context) {
934 atomic_set(entity->guilty, type);
938 spin_unlock(&rq->lock);
939 if (&entity->list != &rq->entities)
944 EXPORT_SYMBOL(drm_sched_increase_karma_ext);