s390/vdso: drop unnecessary cc-ldoption
[linux-2.6-microblaze.git] / drivers / gpu / drm / scheduler / sched_main.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 /**
25  * DOC: Overview
26  *
27  * The GPU scheduler provides entities which allow userspace to push jobs
28  * into software queues which are then scheduled on a hardware run queue.
29  * The software queues have a priority among them. The scheduler selects the entities
30  * from the run queue using a FIFO. The scheduler provides dependency handling
31  * features among jobs. The driver is supposed to provide callback functions for
32  * backend operations to the scheduler like submitting a job to hardware run queue,
33  * returning the dependencies of a job etc.
34  *
35  * The organisation of the scheduler is the following:
36  *
37  * 1. Each hw run queue has one scheduler
38  * 2. Each scheduler has multiple run queues with different priorities
39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40  * 3. Each scheduler run queue has a queue of entities to schedule
41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42  *    the hardware.
43  *
44  * The jobs in a entity are always scheduled in the order that they were pushed.
45  */
46
47 #include <linux/kthread.h>
48 #include <linux/wait.h>
49 #include <linux/sched.h>
50 #include <uapi/linux/sched/types.h>
51 #include <drm/drmP.h>
52 #include <drm/gpu_scheduler.h>
53 #include <drm/spsc_queue.h>
54
55 #define CREATE_TRACE_POINTS
56 #include "gpu_scheduler_trace.h"
57
58 #define to_drm_sched_job(sched_job)             \
59                 container_of((sched_job), struct drm_sched_job, queue_node)
60
61 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
62
63 /**
64  * drm_sched_rq_init - initialize a given run queue struct
65  *
66  * @rq: scheduler run queue
67  *
68  * Initializes a scheduler runqueue.
69  */
70 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
71                               struct drm_sched_rq *rq)
72 {
73         spin_lock_init(&rq->lock);
74         INIT_LIST_HEAD(&rq->entities);
75         rq->current_entity = NULL;
76         rq->sched = sched;
77 }
78
79 /**
80  * drm_sched_rq_add_entity - add an entity
81  *
82  * @rq: scheduler run queue
83  * @entity: scheduler entity
84  *
85  * Adds a scheduler entity to the run queue.
86  */
87 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
88                              struct drm_sched_entity *entity)
89 {
90         if (!list_empty(&entity->list))
91                 return;
92         spin_lock(&rq->lock);
93         list_add_tail(&entity->list, &rq->entities);
94         spin_unlock(&rq->lock);
95 }
96
97 /**
98  * drm_sched_rq_remove_entity - remove an entity
99  *
100  * @rq: scheduler run queue
101  * @entity: scheduler entity
102  *
103  * Removes a scheduler entity from the run queue.
104  */
105 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
106                                 struct drm_sched_entity *entity)
107 {
108         if (list_empty(&entity->list))
109                 return;
110         spin_lock(&rq->lock);
111         list_del_init(&entity->list);
112         if (rq->current_entity == entity)
113                 rq->current_entity = NULL;
114         spin_unlock(&rq->lock);
115 }
116
117 /**
118  * drm_sched_rq_select_entity - Select an entity which could provide a job to run
119  *
120  * @rq: scheduler run queue to check.
121  *
122  * Try to find a ready entity, returns NULL if none found.
123  */
124 static struct drm_sched_entity *
125 drm_sched_rq_select_entity(struct drm_sched_rq *rq)
126 {
127         struct drm_sched_entity *entity;
128
129         spin_lock(&rq->lock);
130
131         entity = rq->current_entity;
132         if (entity) {
133                 list_for_each_entry_continue(entity, &rq->entities, list) {
134                         if (drm_sched_entity_is_ready(entity)) {
135                                 rq->current_entity = entity;
136                                 spin_unlock(&rq->lock);
137                                 return entity;
138                         }
139                 }
140         }
141
142         list_for_each_entry(entity, &rq->entities, list) {
143
144                 if (drm_sched_entity_is_ready(entity)) {
145                         rq->current_entity = entity;
146                         spin_unlock(&rq->lock);
147                         return entity;
148                 }
149
150                 if (entity == rq->current_entity)
151                         break;
152         }
153
154         spin_unlock(&rq->lock);
155
156         return NULL;
157 }
158
159 /**
160  * drm_sched_dependency_optimized
161  *
162  * @fence: the dependency fence
163  * @entity: the entity which depends on the above fence
164  *
165  * Returns true if the dependency can be optimized and false otherwise
166  */
167 bool drm_sched_dependency_optimized(struct dma_fence* fence,
168                                     struct drm_sched_entity *entity)
169 {
170         struct drm_gpu_scheduler *sched = entity->rq->sched;
171         struct drm_sched_fence *s_fence;
172
173         if (!fence || dma_fence_is_signaled(fence))
174                 return false;
175         if (fence->context == entity->fence_context)
176                 return true;
177         s_fence = to_drm_sched_fence(fence);
178         if (s_fence && s_fence->sched == sched)
179                 return true;
180
181         return false;
182 }
183 EXPORT_SYMBOL(drm_sched_dependency_optimized);
184
185 /**
186  * drm_sched_start_timeout - start timeout for reset worker
187  *
188  * @sched: scheduler instance to start the worker for
189  *
190  * Start the timeout for the given scheduler.
191  */
192 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
193 {
194         if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
195             !list_empty(&sched->ring_mirror_list))
196                 schedule_delayed_work(&sched->work_tdr, sched->timeout);
197 }
198
199 /**
200  * drm_sched_fault - immediately start timeout handler
201  *
202  * @sched: scheduler where the timeout handling should be started.
203  *
204  * Start timeout handling immediately when the driver detects a hardware fault.
205  */
206 void drm_sched_fault(struct drm_gpu_scheduler *sched)
207 {
208         mod_delayed_work(system_wq, &sched->work_tdr, 0);
209 }
210 EXPORT_SYMBOL(drm_sched_fault);
211
212 /**
213  * drm_sched_suspend_timeout - Suspend scheduler job timeout
214  *
215  * @sched: scheduler instance for which to suspend the timeout
216  *
217  * Suspend the delayed work timeout for the scheduler. This is done by
218  * modifying the delayed work timeout to an arbitrary large value,
219  * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
220  * called from an IRQ context.
221  *
222  * Returns the timeout remaining
223  *
224  */
225 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
226 {
227         unsigned long sched_timeout, now = jiffies;
228
229         sched_timeout = sched->work_tdr.timer.expires;
230
231         /*
232          * Modify the timeout to an arbitrarily large value. This also prevents
233          * the timeout to be restarted when new submissions arrive
234          */
235         if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
236                         && time_after(sched_timeout, now))
237                 return sched_timeout - now;
238         else
239                 return sched->timeout;
240 }
241 EXPORT_SYMBOL(drm_sched_suspend_timeout);
242
243 /**
244  * drm_sched_resume_timeout - Resume scheduler job timeout
245  *
246  * @sched: scheduler instance for which to resume the timeout
247  * @remaining: remaining timeout
248  *
249  * Resume the delayed work timeout for the scheduler. Note that
250  * this function can be called from an IRQ context.
251  */
252 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
253                 unsigned long remaining)
254 {
255         unsigned long flags;
256
257         spin_lock_irqsave(&sched->job_list_lock, flags);
258
259         if (list_empty(&sched->ring_mirror_list))
260                 cancel_delayed_work(&sched->work_tdr);
261         else
262                 mod_delayed_work(system_wq, &sched->work_tdr, remaining);
263
264         spin_unlock_irqrestore(&sched->job_list_lock, flags);
265 }
266 EXPORT_SYMBOL(drm_sched_resume_timeout);
267
268 /* job_finish is called after hw fence signaled
269  */
270 static void drm_sched_job_finish(struct work_struct *work)
271 {
272         struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
273                                                    finish_work);
274         struct drm_gpu_scheduler *sched = s_job->sched;
275         unsigned long flags;
276
277         /*
278          * Canceling the timeout without removing our job from the ring mirror
279          * list is safe, as we will only end up in this worker if our jobs
280          * finished fence has been signaled. So even if some another worker
281          * manages to find this job as the next job in the list, the fence
282          * signaled check below will prevent the timeout to be restarted.
283          */
284         cancel_delayed_work_sync(&sched->work_tdr);
285
286         spin_lock_irqsave(&sched->job_list_lock, flags);
287         /* queue TDR for next job */
288         drm_sched_start_timeout(sched);
289         spin_unlock_irqrestore(&sched->job_list_lock, flags);
290
291         sched->ops->free_job(s_job);
292 }
293
294 static void drm_sched_job_begin(struct drm_sched_job *s_job)
295 {
296         struct drm_gpu_scheduler *sched = s_job->sched;
297         unsigned long flags;
298
299         spin_lock_irqsave(&sched->job_list_lock, flags);
300         list_add_tail(&s_job->node, &sched->ring_mirror_list);
301         drm_sched_start_timeout(sched);
302         spin_unlock_irqrestore(&sched->job_list_lock, flags);
303 }
304
305 static void drm_sched_job_timedout(struct work_struct *work)
306 {
307         struct drm_gpu_scheduler *sched;
308         struct drm_sched_job *job;
309         unsigned long flags;
310
311         sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
312         job = list_first_entry_or_null(&sched->ring_mirror_list,
313                                        struct drm_sched_job, node);
314
315         if (job)
316                 job->sched->ops->timedout_job(job);
317
318         spin_lock_irqsave(&sched->job_list_lock, flags);
319         drm_sched_start_timeout(sched);
320         spin_unlock_irqrestore(&sched->job_list_lock, flags);
321 }
322
323  /**
324   * drm_sched_increase_karma - Update sched_entity guilty flag
325   *
326   * @bad: The job guilty of time out
327   *
328   * Increment on every hang caused by the 'bad' job. If this exceeds the hang
329   * limit of the scheduler then the respective sched entity is marked guilty and
330   * jobs from it will not be scheduled further
331   */
332 void drm_sched_increase_karma(struct drm_sched_job *bad)
333 {
334         int i;
335         struct drm_sched_entity *tmp;
336         struct drm_sched_entity *entity;
337         struct drm_gpu_scheduler *sched = bad->sched;
338
339         /* don't increase @bad's karma if it's from KERNEL RQ,
340          * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
341          * corrupt but keep in mind that kernel jobs always considered good.
342          */
343         if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
344                 atomic_inc(&bad->karma);
345                 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
346                      i++) {
347                         struct drm_sched_rq *rq = &sched->sched_rq[i];
348
349                         spin_lock(&rq->lock);
350                         list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
351                                 if (bad->s_fence->scheduled.context ==
352                                     entity->fence_context) {
353                                         if (atomic_read(&bad->karma) >
354                                             bad->sched->hang_limit)
355                                                 if (entity->guilty)
356                                                         atomic_set(entity->guilty, 1);
357                                         break;
358                                 }
359                         }
360                         spin_unlock(&rq->lock);
361                         if (&entity->list != &rq->entities)
362                                 break;
363                 }
364         }
365 }
366 EXPORT_SYMBOL(drm_sched_increase_karma);
367
368 /**
369  * drm_sched_hw_job_reset - stop the scheduler if it contains the bad job
370  *
371  * @sched: scheduler instance
372  * @bad: bad scheduler job
373  *
374  */
375 void drm_sched_stop(struct drm_gpu_scheduler *sched)
376 {
377         struct drm_sched_job *s_job;
378         unsigned long flags;
379         struct dma_fence *last_fence =  NULL;
380
381         kthread_park(sched->thread);
382
383         /*
384          * Verify all the signaled jobs in mirror list are removed from the ring
385          * by waiting for the latest job to enter the list. This should insure that
386          * also all the previous jobs that were in flight also already singaled
387          * and removed from the list.
388          */
389         spin_lock_irqsave(&sched->job_list_lock, flags);
390         list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
391                 if (s_job->s_fence->parent &&
392                     dma_fence_remove_callback(s_job->s_fence->parent,
393                                               &s_job->cb)) {
394                         dma_fence_put(s_job->s_fence->parent);
395                         s_job->s_fence->parent = NULL;
396                         atomic_dec(&sched->hw_rq_count);
397                 } else {
398                          last_fence = dma_fence_get(&s_job->s_fence->finished);
399                          break;
400                 }
401         }
402         spin_unlock_irqrestore(&sched->job_list_lock, flags);
403
404         if (last_fence) {
405                 dma_fence_wait(last_fence, false);
406                 dma_fence_put(last_fence);
407         }
408 }
409
410 EXPORT_SYMBOL(drm_sched_stop);
411
412 /**
413  * drm_sched_job_recovery - recover jobs after a reset
414  *
415  * @sched: scheduler instance
416  *
417  */
418 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
419 {
420         struct drm_sched_job *s_job, *tmp;
421         int r;
422
423         if (!full_recovery)
424                 goto unpark;
425
426         /*
427          * Locking the list is not required here as the sched thread is parked
428          * so no new jobs are being pushed in to HW and in drm_sched_stop we
429          * flushed all the jobs who were still in mirror list but who already
430          * signaled and removed them self from the list. Also concurrent
431          * GPU recovers can't run in parallel.
432          */
433         list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
434                 struct dma_fence *fence = s_job->s_fence->parent;
435
436                 if (fence) {
437                         r = dma_fence_add_callback(fence, &s_job->cb,
438                                                    drm_sched_process_job);
439                         if (r == -ENOENT)
440                                 drm_sched_process_job(fence, &s_job->cb);
441                         else if (r)
442                                 DRM_ERROR("fence add callback failed (%d)\n",
443                                           r);
444                 } else
445                         drm_sched_process_job(NULL, &s_job->cb);
446         }
447
448         drm_sched_start_timeout(sched);
449
450 unpark:
451         kthread_unpark(sched->thread);
452 }
453 EXPORT_SYMBOL(drm_sched_start);
454
455 /**
456  * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
457  *
458  * @sched: scheduler instance
459  *
460  */
461 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
462 {
463         struct drm_sched_job *s_job, *tmp;
464         uint64_t guilty_context;
465         bool found_guilty = false;
466
467         /*TODO DO we need spinlock here ? */
468         list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
469                 struct drm_sched_fence *s_fence = s_job->s_fence;
470
471                 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
472                         found_guilty = true;
473                         guilty_context = s_job->s_fence->scheduled.context;
474                 }
475
476                 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
477                         dma_fence_set_error(&s_fence->finished, -ECANCELED);
478
479                 s_job->s_fence->parent = sched->ops->run_job(s_job);
480                 atomic_inc(&sched->hw_rq_count);
481         }
482 }
483 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
484
485 /**
486  * drm_sched_job_init - init a scheduler job
487  *
488  * @job: scheduler job to init
489  * @entity: scheduler entity to use
490  * @owner: job owner for debugging
491  *
492  * Refer to drm_sched_entity_push_job() documentation
493  * for locking considerations.
494  *
495  * Returns 0 for success, negative error code otherwise.
496  */
497 int drm_sched_job_init(struct drm_sched_job *job,
498                        struct drm_sched_entity *entity,
499                        void *owner)
500 {
501         struct drm_gpu_scheduler *sched;
502
503         drm_sched_entity_select_rq(entity);
504         if (!entity->rq)
505                 return -ENOENT;
506
507         sched = entity->rq->sched;
508
509         job->sched = sched;
510         job->entity = entity;
511         job->s_priority = entity->rq - sched->sched_rq;
512         job->s_fence = drm_sched_fence_create(entity, owner);
513         if (!job->s_fence)
514                 return -ENOMEM;
515         job->id = atomic64_inc_return(&sched->job_id_count);
516
517         INIT_WORK(&job->finish_work, drm_sched_job_finish);
518         INIT_LIST_HEAD(&job->node);
519
520         return 0;
521 }
522 EXPORT_SYMBOL(drm_sched_job_init);
523
524 /**
525  * drm_sched_job_cleanup - clean up scheduler job resources
526  *
527  * @job: scheduler job to clean up
528  */
529 void drm_sched_job_cleanup(struct drm_sched_job *job)
530 {
531         dma_fence_put(&job->s_fence->finished);
532         job->s_fence = NULL;
533 }
534 EXPORT_SYMBOL(drm_sched_job_cleanup);
535
536 /**
537  * drm_sched_ready - is the scheduler ready
538  *
539  * @sched: scheduler instance
540  *
541  * Return true if we can push more jobs to the hw, otherwise false.
542  */
543 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
544 {
545         return atomic_read(&sched->hw_rq_count) <
546                 sched->hw_submission_limit;
547 }
548
549 /**
550  * drm_sched_wakeup - Wake up the scheduler when it is ready
551  *
552  * @sched: scheduler instance
553  *
554  */
555 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
556 {
557         if (drm_sched_ready(sched))
558                 wake_up_interruptible(&sched->wake_up_worker);
559 }
560
561 /**
562  * drm_sched_select_entity - Select next entity to process
563  *
564  * @sched: scheduler instance
565  *
566  * Returns the entity to process or NULL if none are found.
567  */
568 static struct drm_sched_entity *
569 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
570 {
571         struct drm_sched_entity *entity;
572         int i;
573
574         if (!drm_sched_ready(sched))
575                 return NULL;
576
577         /* Kernel run queue has higher priority than normal run queue*/
578         for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
579                 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
580                 if (entity)
581                         break;
582         }
583
584         return entity;
585 }
586
587 /**
588  * drm_sched_process_job - process a job
589  *
590  * @f: fence
591  * @cb: fence callbacks
592  *
593  * Called after job has finished execution.
594  */
595 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
596 {
597         struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
598         struct drm_sched_fence *s_fence = s_job->s_fence;
599         struct drm_gpu_scheduler *sched = s_fence->sched;
600         unsigned long flags;
601
602         cancel_delayed_work(&sched->work_tdr);
603
604         atomic_dec(&sched->hw_rq_count);
605         atomic_dec(&sched->num_jobs);
606
607         spin_lock_irqsave(&sched->job_list_lock, flags);
608         /* remove job from ring_mirror_list */
609         list_del_init(&s_job->node);
610         spin_unlock_irqrestore(&sched->job_list_lock, flags);
611
612         drm_sched_fence_finished(s_fence);
613
614         trace_drm_sched_process_job(s_fence);
615         wake_up_interruptible(&sched->wake_up_worker);
616
617         schedule_work(&s_job->finish_work);
618 }
619
620 /**
621  * drm_sched_blocked - check if the scheduler is blocked
622  *
623  * @sched: scheduler instance
624  *
625  * Returns true if blocked, otherwise false.
626  */
627 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
628 {
629         if (kthread_should_park()) {
630                 kthread_parkme();
631                 return true;
632         }
633
634         return false;
635 }
636
637 /**
638  * drm_sched_main - main scheduler thread
639  *
640  * @param: scheduler instance
641  *
642  * Returns 0.
643  */
644 static int drm_sched_main(void *param)
645 {
646         struct sched_param sparam = {.sched_priority = 1};
647         struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
648         int r;
649
650         sched_setscheduler(current, SCHED_FIFO, &sparam);
651
652         while (!kthread_should_stop()) {
653                 struct drm_sched_entity *entity = NULL;
654                 struct drm_sched_fence *s_fence;
655                 struct drm_sched_job *sched_job;
656                 struct dma_fence *fence;
657
658                 wait_event_interruptible(sched->wake_up_worker,
659                                          (!drm_sched_blocked(sched) &&
660                                           (entity = drm_sched_select_entity(sched))) ||
661                                          kthread_should_stop());
662
663                 if (!entity)
664                         continue;
665
666                 sched_job = drm_sched_entity_pop_job(entity);
667                 if (!sched_job)
668                         continue;
669
670                 s_fence = sched_job->s_fence;
671
672                 atomic_inc(&sched->hw_rq_count);
673                 drm_sched_job_begin(sched_job);
674
675                 fence = sched->ops->run_job(sched_job);
676                 drm_sched_fence_scheduled(s_fence);
677
678                 if (fence) {
679                         s_fence->parent = dma_fence_get(fence);
680                         r = dma_fence_add_callback(fence, &sched_job->cb,
681                                                    drm_sched_process_job);
682                         if (r == -ENOENT)
683                                 drm_sched_process_job(fence, &sched_job->cb);
684                         else if (r)
685                                 DRM_ERROR("fence add callback failed (%d)\n",
686                                           r);
687                         dma_fence_put(fence);
688                 } else
689                         drm_sched_process_job(NULL, &sched_job->cb);
690
691                 wake_up(&sched->job_scheduled);
692         }
693         return 0;
694 }
695
696 /**
697  * drm_sched_init - Init a gpu scheduler instance
698  *
699  * @sched: scheduler instance
700  * @ops: backend operations for this scheduler
701  * @hw_submission: number of hw submissions that can be in flight
702  * @hang_limit: number of times to allow a job to hang before dropping it
703  * @timeout: timeout value in jiffies for the scheduler
704  * @name: name used for debugging
705  *
706  * Return 0 on success, otherwise error code.
707  */
708 int drm_sched_init(struct drm_gpu_scheduler *sched,
709                    const struct drm_sched_backend_ops *ops,
710                    unsigned hw_submission,
711                    unsigned hang_limit,
712                    long timeout,
713                    const char *name)
714 {
715         int i, ret;
716         sched->ops = ops;
717         sched->hw_submission_limit = hw_submission;
718         sched->name = name;
719         sched->timeout = timeout;
720         sched->hang_limit = hang_limit;
721         for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
722                 drm_sched_rq_init(sched, &sched->sched_rq[i]);
723
724         init_waitqueue_head(&sched->wake_up_worker);
725         init_waitqueue_head(&sched->job_scheduled);
726         INIT_LIST_HEAD(&sched->ring_mirror_list);
727         spin_lock_init(&sched->job_list_lock);
728         atomic_set(&sched->hw_rq_count, 0);
729         INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
730         atomic_set(&sched->num_jobs, 0);
731         atomic64_set(&sched->job_id_count, 0);
732
733         /* Each scheduler will run on a seperate kernel thread */
734         sched->thread = kthread_run(drm_sched_main, sched, sched->name);
735         if (IS_ERR(sched->thread)) {
736                 ret = PTR_ERR(sched->thread);
737                 sched->thread = NULL;
738                 DRM_ERROR("Failed to create scheduler for %s.\n", name);
739                 return ret;
740         }
741
742         sched->ready = true;
743         return 0;
744 }
745 EXPORT_SYMBOL(drm_sched_init);
746
747 /**
748  * drm_sched_fini - Destroy a gpu scheduler
749  *
750  * @sched: scheduler instance
751  *
752  * Tears down and cleans up the scheduler.
753  */
754 void drm_sched_fini(struct drm_gpu_scheduler *sched)
755 {
756         if (sched->thread)
757                 kthread_stop(sched->thread);
758
759         sched->ready = false;
760 }
761 EXPORT_SYMBOL(drm_sched_fini);