Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / gpu / drm / etnaviv / etnaviv_sched.c
1 /*
2  * Copyright (C) 2017 Etnaviv Project
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16
17 #include <linux/kthread.h>
18
19 #include "etnaviv_drv.h"
20 #include "etnaviv_dump.h"
21 #include "etnaviv_gem.h"
22 #include "etnaviv_gpu.h"
23 #include "etnaviv_sched.h"
24
25 static int etnaviv_job_hang_limit = 0;
26 module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
27 static int etnaviv_hw_jobs_limit = 4;
28 module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
29
30 static struct dma_fence *
31 etnaviv_sched_dependency(struct drm_sched_job *sched_job,
32                          struct drm_sched_entity *entity)
33 {
34         struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
35         struct dma_fence *fence;
36         int i;
37
38         if (unlikely(submit->in_fence)) {
39                 fence = submit->in_fence;
40                 submit->in_fence = NULL;
41
42                 if (!dma_fence_is_signaled(fence))
43                         return fence;
44
45                 dma_fence_put(fence);
46         }
47
48         for (i = 0; i < submit->nr_bos; i++) {
49                 struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
50                 int j;
51
52                 if (bo->excl) {
53                         fence = bo->excl;
54                         bo->excl = NULL;
55
56                         if (!dma_fence_is_signaled(fence))
57                                 return fence;
58
59                         dma_fence_put(fence);
60                 }
61
62                 for (j = 0; j < bo->nr_shared; j++) {
63                         if (!bo->shared[j])
64                                 continue;
65
66                         fence = bo->shared[j];
67                         bo->shared[j] = NULL;
68
69                         if (!dma_fence_is_signaled(fence))
70                                 return fence;
71
72                         dma_fence_put(fence);
73                 }
74                 kfree(bo->shared);
75                 bo->nr_shared = 0;
76                 bo->shared = NULL;
77         }
78
79         return NULL;
80 }
81
82 static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
83 {
84         struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
85         struct dma_fence *fence = NULL;
86
87         if (likely(!sched_job->s_fence->finished.error))
88                 fence = etnaviv_gpu_submit(submit);
89         else
90                 dev_dbg(submit->gpu->dev, "skipping bad job\n");
91
92         return fence;
93 }
94
95 static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
96 {
97         struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
98         struct etnaviv_gpu *gpu = submit->gpu;
99
100         /* block scheduler */
101         kthread_park(gpu->sched.thread);
102         drm_sched_hw_job_reset(&gpu->sched, sched_job);
103
104         /* get the GPU back into the init state */
105         etnaviv_core_dump(gpu);
106         etnaviv_gpu_recover_hang(gpu);
107
108         /* restart scheduler after GPU is usable again */
109         drm_sched_job_recovery(&gpu->sched);
110         kthread_unpark(gpu->sched.thread);
111 }
112
113 static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
114 {
115         struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
116
117         etnaviv_submit_put(submit);
118 }
119
120 static const struct drm_sched_backend_ops etnaviv_sched_ops = {
121         .dependency = etnaviv_sched_dependency,
122         .run_job = etnaviv_sched_run_job,
123         .timedout_job = etnaviv_sched_timedout_job,
124         .free_job = etnaviv_sched_free_job,
125 };
126
127 int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
128                            struct etnaviv_gem_submit *submit)
129 {
130         int ret;
131
132         ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
133                                  sched_entity, submit->cmdbuf.ctx);
134         if (ret)
135                 return ret;
136
137         submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
138         mutex_lock(&submit->gpu->fence_idr_lock);
139         submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
140                                                 submit->out_fence, 0,
141                                                 INT_MAX, GFP_KERNEL);
142         mutex_unlock(&submit->gpu->fence_idr_lock);
143         if (submit->out_fence_id < 0)
144                 return -ENOMEM;
145
146         /* the scheduler holds on to the job now */
147         kref_get(&submit->refcount);
148
149         drm_sched_entity_push_job(&submit->sched_job, sched_entity);
150
151         return 0;
152 }
153
154 int etnaviv_sched_init(struct etnaviv_gpu *gpu)
155 {
156         int ret;
157
158         ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
159                              etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
160                              msecs_to_jiffies(500), dev_name(gpu->dev));
161         if (ret)
162                 return ret;
163
164         return 0;
165 }
166
167 void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
168 {
169         drm_sched_fini(&gpu->sched);
170 }