Merge tag 'for-5.13-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_job.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27
28 #include "amdgpu.h"
29 #include "amdgpu_trace.h"
30
31 static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
32 {
33         struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
34         struct amdgpu_job *job = to_amdgpu_job(s_job);
35         struct amdgpu_task_info ti;
36         struct amdgpu_device *adev = ring->adev;
37
38         memset(&ti, 0, sizeof(struct amdgpu_task_info));
39
40         if (amdgpu_gpu_recovery &&
41             amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
42                 DRM_ERROR("ring %s timeout, but soft recovered\n",
43                           s_job->sched->name);
44                 return DRM_GPU_SCHED_STAT_NOMINAL;
45         }
46
47         amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
48         DRM_ERROR("ring %s timeout, signaled seq=%u, emitted seq=%u\n",
49                   job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
50                   ring->fence_drv.sync_seq);
51         DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
52                   ti.process_name, ti.tgid, ti.task_name, ti.pid);
53
54         if (amdgpu_device_should_recover_gpu(ring->adev)) {
55                 amdgpu_device_gpu_recover(ring->adev, job);
56                 return DRM_GPU_SCHED_STAT_NOMINAL;
57         } else {
58                 drm_sched_suspend_timeout(&ring->sched);
59                 if (amdgpu_sriov_vf(adev))
60                         adev->virt.tdr_debug = true;
61                 return DRM_GPU_SCHED_STAT_NOMINAL;
62         }
63 }
64
65 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
66                      struct amdgpu_job **job, struct amdgpu_vm *vm)
67 {
68         size_t size = sizeof(struct amdgpu_job);
69
70         if (num_ibs == 0)
71                 return -EINVAL;
72
73         size += sizeof(struct amdgpu_ib) * num_ibs;
74
75         *job = kzalloc(size, GFP_KERNEL);
76         if (!*job)
77                 return -ENOMEM;
78
79         /*
80          * Initialize the scheduler to at least some ring so that we always
81          * have a pointer to adev.
82          */
83         (*job)->base.sched = &adev->rings[0]->sched;
84         (*job)->vm = vm;
85         (*job)->ibs = (void *)&(*job)[1];
86         (*job)->num_ibs = num_ibs;
87
88         amdgpu_sync_create(&(*job)->sync);
89         amdgpu_sync_create(&(*job)->sched_sync);
90         (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
91         (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
92
93         return 0;
94 }
95
96 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
97                 enum amdgpu_ib_pool_type pool_type,
98                 struct amdgpu_job **job)
99 {
100         int r;
101
102         r = amdgpu_job_alloc(adev, 1, job, NULL);
103         if (r)
104                 return r;
105
106         r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
107         if (r)
108                 kfree(*job);
109
110         return r;
111 }
112
113 void amdgpu_job_free_resources(struct amdgpu_job *job)
114 {
115         struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
116         struct dma_fence *f;
117         unsigned i;
118
119         /* use sched fence if available */
120         f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
121
122         for (i = 0; i < job->num_ibs; ++i)
123                 amdgpu_ib_free(ring->adev, &job->ibs[i], f);
124 }
125
126 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
127 {
128         struct amdgpu_job *job = to_amdgpu_job(s_job);
129
130         drm_sched_job_cleanup(s_job);
131
132         dma_fence_put(job->fence);
133         amdgpu_sync_free(&job->sync);
134         amdgpu_sync_free(&job->sched_sync);
135         kfree(job);
136 }
137
138 void amdgpu_job_free(struct amdgpu_job *job)
139 {
140         amdgpu_job_free_resources(job);
141
142         dma_fence_put(job->fence);
143         amdgpu_sync_free(&job->sync);
144         amdgpu_sync_free(&job->sched_sync);
145         kfree(job);
146 }
147
148 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
149                       void *owner, struct dma_fence **f)
150 {
151         int r;
152
153         if (!f)
154                 return -EINVAL;
155
156         r = drm_sched_job_init(&job->base, entity, owner);
157         if (r)
158                 return r;
159
160         *f = dma_fence_get(&job->base.s_fence->finished);
161         amdgpu_job_free_resources(job);
162         drm_sched_entity_push_job(&job->base, entity);
163
164         return 0;
165 }
166
167 int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
168                              struct dma_fence **fence)
169 {
170         int r;
171
172         job->base.sched = &ring->sched;
173         r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
174         job->fence = dma_fence_get(*fence);
175         if (r)
176                 return r;
177
178         amdgpu_job_free(job);
179         return 0;
180 }
181
182 static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
183                                                struct drm_sched_entity *s_entity)
184 {
185         struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
186         struct amdgpu_job *job = to_amdgpu_job(sched_job);
187         struct amdgpu_vm *vm = job->vm;
188         struct dma_fence *fence;
189         int r;
190
191         fence = amdgpu_sync_get_fence(&job->sync);
192         if (fence && drm_sched_dependency_optimized(fence, s_entity)) {
193                 r = amdgpu_sync_fence(&job->sched_sync, fence);
194                 if (r)
195                         DRM_ERROR("Error adding fence (%d)\n", r);
196         }
197
198         while (fence == NULL && vm && !job->vmid) {
199                 r = amdgpu_vmid_grab(vm, ring, &job->sync,
200                                      &job->base.s_fence->finished,
201                                      job);
202                 if (r)
203                         DRM_ERROR("Error getting VM ID (%d)\n", r);
204
205                 fence = amdgpu_sync_get_fence(&job->sync);
206         }
207
208         return fence;
209 }
210
211 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
212 {
213         struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
214         struct dma_fence *fence = NULL, *finished;
215         struct amdgpu_job *job;
216         int r = 0;
217
218         job = to_amdgpu_job(sched_job);
219         finished = &job->base.s_fence->finished;
220
221         BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
222
223         trace_amdgpu_sched_run_job(job);
224
225         if (job->vram_lost_counter != atomic_read(&ring->adev->vram_lost_counter))
226                 dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
227
228         if (finished->error < 0) {
229                 DRM_INFO("Skip scheduling IBs!\n");
230         } else {
231                 r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
232                                        &fence);
233                 if (r)
234                         DRM_ERROR("Error scheduling IBs (%d)\n", r);
235         }
236         /* if gpu reset, hw fence will be replaced here */
237         dma_fence_put(job->fence);
238         job->fence = dma_fence_get(fence);
239
240         amdgpu_job_free_resources(job);
241
242         fence = r ? ERR_PTR(r) : fence;
243         return fence;
244 }
245
246 #define to_drm_sched_job(sched_job)             \
247                 container_of((sched_job), struct drm_sched_job, queue_node)
248
249 void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
250 {
251         struct drm_sched_job *s_job;
252         struct drm_sched_entity *s_entity = NULL;
253         int i;
254
255         /* Signal all jobs not yet scheduled */
256         for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
257                 struct drm_sched_rq *rq = &sched->sched_rq[i];
258
259                 if (!rq)
260                         continue;
261
262                 spin_lock(&rq->lock);
263                 list_for_each_entry(s_entity, &rq->entities, list) {
264                         while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
265                                 struct drm_sched_fence *s_fence = s_job->s_fence;
266
267                                 dma_fence_signal(&s_fence->scheduled);
268                                 dma_fence_set_error(&s_fence->finished, -EHWPOISON);
269                                 dma_fence_signal(&s_fence->finished);
270                         }
271                 }
272                 spin_unlock(&rq->lock);
273         }
274
275         /* Signal all jobs already scheduled to HW */
276         list_for_each_entry(s_job, &sched->pending_list, list) {
277                 struct drm_sched_fence *s_fence = s_job->s_fence;
278
279                 dma_fence_set_error(&s_fence->finished, -EHWPOISON);
280                 dma_fence_signal(&s_fence->finished);
281         }
282 }
283
284 const struct drm_sched_backend_ops amdgpu_sched_ops = {
285         .dependency = amdgpu_job_dependency,
286         .run_job = amdgpu_job_run,
287         .timedout_job = amdgpu_job_timedout,
288         .free_job = amdgpu_job_free_cb
289 };