Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
[linux-2.6-microblaze.git] / drivers / gpu / drm / etnaviv / etnaviv_sched.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2017 Etnaviv Project
4  */
5
6 #include <linux/moduleparam.h>
7
8 #include "etnaviv_drv.h"
9 #include "etnaviv_dump.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_gpu.h"
12 #include "etnaviv_sched.h"
13 #include "state.xml.h"
14
15 static int etnaviv_job_hang_limit = 0;
16 module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
17 static int etnaviv_hw_jobs_limit = 4;
18 module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
19
20 static struct dma_fence *
21 etnaviv_sched_dependency(struct drm_sched_job *sched_job,
22                          struct drm_sched_entity *entity)
23 {
24         struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
25         struct dma_fence *fence;
26         int i;
27
28         if (unlikely(submit->in_fence)) {
29                 fence = submit->in_fence;
30                 submit->in_fence = NULL;
31
32                 if (!dma_fence_is_signaled(fence))
33                         return fence;
34
35                 dma_fence_put(fence);
36         }
37
38         for (i = 0; i < submit->nr_bos; i++) {
39                 struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
40                 int j;
41
42                 if (bo->excl) {
43                         fence = bo->excl;
44                         bo->excl = NULL;
45
46                         if (!dma_fence_is_signaled(fence))
47                                 return fence;
48
49                         dma_fence_put(fence);
50                 }
51
52                 for (j = 0; j < bo->nr_shared; j++) {
53                         if (!bo->shared[j])
54                                 continue;
55
56                         fence = bo->shared[j];
57                         bo->shared[j] = NULL;
58
59                         if (!dma_fence_is_signaled(fence))
60                                 return fence;
61
62                         dma_fence_put(fence);
63                 }
64                 kfree(bo->shared);
65                 bo->nr_shared = 0;
66                 bo->shared = NULL;
67         }
68
69         return NULL;
70 }
71
72 static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
73 {
74         struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
75         struct dma_fence *fence = NULL;
76
77         if (likely(!sched_job->s_fence->finished.error))
78                 fence = etnaviv_gpu_submit(submit);
79         else
80                 dev_dbg(submit->gpu->dev, "skipping bad job\n");
81
82         return fence;
83 }
84
85 static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
86 {
87         struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
88         struct etnaviv_gpu *gpu = submit->gpu;
89         u32 dma_addr;
90         int change;
91
92         /* block scheduler */
93         drm_sched_stop(&gpu->sched, sched_job);
94
95         /*
96          * If the GPU managed to complete this jobs fence, the timout is
97          * spurious. Bail out.
98          */
99         if (dma_fence_is_signaled(submit->out_fence))
100                 goto out_no_timeout;
101
102         /*
103          * If the GPU is still making forward progress on the front-end (which
104          * should never loop) we shift out the timeout to give it a chance to
105          * finish the job.
106          */
107         dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
108         change = dma_addr - gpu->hangcheck_dma_addr;
109         if (change < 0 || change > 16) {
110                 gpu->hangcheck_dma_addr = dma_addr;
111                 goto out_no_timeout;
112         }
113
114         if(sched_job)
115                 drm_sched_increase_karma(sched_job);
116
117         /* get the GPU back into the init state */
118         etnaviv_core_dump(submit);
119         etnaviv_gpu_recover_hang(gpu);
120
121         drm_sched_resubmit_jobs(&gpu->sched);
122
123 out_no_timeout:
124         /* restart scheduler after GPU is usable again */
125         drm_sched_start(&gpu->sched, true);
126 }
127
128 static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
129 {
130         struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
131
132         drm_sched_job_cleanup(sched_job);
133
134         etnaviv_submit_put(submit);
135 }
136
137 static const struct drm_sched_backend_ops etnaviv_sched_ops = {
138         .dependency = etnaviv_sched_dependency,
139         .run_job = etnaviv_sched_run_job,
140         .timedout_job = etnaviv_sched_timedout_job,
141         .free_job = etnaviv_sched_free_job,
142 };
143
144 int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
145                            struct etnaviv_gem_submit *submit)
146 {
147         int ret = 0;
148
149         /*
150          * Hold the fence lock across the whole operation to avoid jobs being
151          * pushed out of order with regard to their sched fence seqnos as
152          * allocated in drm_sched_job_init.
153          */
154         mutex_lock(&submit->gpu->fence_lock);
155
156         ret = drm_sched_job_init(&submit->sched_job, sched_entity,
157                                  submit->ctx);
158         if (ret)
159                 goto out_unlock;
160
161         submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
162         submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
163                                                 submit->out_fence, 0,
164                                                 INT_MAX, GFP_KERNEL);
165         if (submit->out_fence_id < 0) {
166                 drm_sched_job_cleanup(&submit->sched_job);
167                 ret = -ENOMEM;
168                 goto out_unlock;
169         }
170
171         /* the scheduler holds on to the job now */
172         kref_get(&submit->refcount);
173
174         drm_sched_entity_push_job(&submit->sched_job, sched_entity);
175
176 out_unlock:
177         mutex_unlock(&submit->gpu->fence_lock);
178
179         return ret;
180 }
181
182 int etnaviv_sched_init(struct etnaviv_gpu *gpu)
183 {
184         int ret;
185
186         ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
187                              etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
188                              msecs_to_jiffies(500), dev_name(gpu->dev));
189         if (ret)
190                 return ret;
191
192         return 0;
193 }
194
195 void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
196 {
197         drm_sched_fini(&gpu->sched);
198 }