Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_job.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "amdgpu.h"
29 #include "amdgpu_trace.h"
30
31 static void amdgpu_job_timedout(struct drm_sched_job *s_job)
32 {
33         struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
34
35         DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
36                   job->base.sched->name,
37                   atomic_read(&job->ring->fence_drv.last_seq),
38                   job->ring->fence_drv.sync_seq);
39
40         amdgpu_device_gpu_recover(job->adev, job, false);
41 }
42
43 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
44                      struct amdgpu_job **job, struct amdgpu_vm *vm)
45 {
46         size_t size = sizeof(struct amdgpu_job);
47
48         if (num_ibs == 0)
49                 return -EINVAL;
50
51         size += sizeof(struct amdgpu_ib) * num_ibs;
52
53         *job = kzalloc(size, GFP_KERNEL);
54         if (!*job)
55                 return -ENOMEM;
56
57         (*job)->adev = adev;
58         (*job)->vm = vm;
59         (*job)->ibs = (void *)&(*job)[1];
60         (*job)->num_ibs = num_ibs;
61
62         amdgpu_sync_create(&(*job)->sync);
63         amdgpu_sync_create(&(*job)->sched_sync);
64         (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
65
66         return 0;
67 }
68
69 int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
70                              struct amdgpu_job **job)
71 {
72         int r;
73
74         r = amdgpu_job_alloc(adev, 1, job, NULL);
75         if (r)
76                 return r;
77
78         r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
79         if (r)
80                 kfree(*job);
81         else
82                 (*job)->vm_pd_addr = adev->gart.table_addr;
83
84         return r;
85 }
86
87 void amdgpu_job_free_resources(struct amdgpu_job *job)
88 {
89         struct dma_fence *f;
90         unsigned i;
91
92         /* use sched fence if available */
93         f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
94
95         for (i = 0; i < job->num_ibs; ++i)
96                 amdgpu_ib_free(job->adev, &job->ibs[i], f);
97 }
98
99 static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
100 {
101         struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
102
103         amdgpu_ring_priority_put(job->ring, s_job->s_priority);
104         dma_fence_put(job->fence);
105         amdgpu_sync_free(&job->sync);
106         amdgpu_sync_free(&job->sched_sync);
107         kfree(job);
108 }
109
110 void amdgpu_job_free(struct amdgpu_job *job)
111 {
112         amdgpu_job_free_resources(job);
113
114         dma_fence_put(job->fence);
115         amdgpu_sync_free(&job->sync);
116         amdgpu_sync_free(&job->sched_sync);
117         kfree(job);
118 }
119
120 int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
121                       struct drm_sched_entity *entity, void *owner,
122                       struct dma_fence **f)
123 {
124         int r;
125         job->ring = ring;
126
127         if (!f)
128                 return -EINVAL;
129
130         r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
131         if (r)
132                 return r;
133
134         job->owner = owner;
135         job->fence_ctx = entity->fence_context;
136         *f = dma_fence_get(&job->base.s_fence->finished);
137         amdgpu_job_free_resources(job);
138         amdgpu_ring_priority_get(job->ring, job->base.s_priority);
139         drm_sched_entity_push_job(&job->base, entity);
140
141         return 0;
142 }
143
144 static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
145                                                struct drm_sched_entity *s_entity)
146 {
147         struct amdgpu_job *job = to_amdgpu_job(sched_job);
148         struct amdgpu_vm *vm = job->vm;
149         bool explicit = false;
150         int r;
151         struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
152
153         if (fence && explicit) {
154                 if (drm_sched_dependency_optimized(fence, s_entity)) {
155                         r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
156                         if (r)
157                                 DRM_ERROR("Error adding fence to sync (%d)\n", r);
158                 }
159         }
160
161         while (fence == NULL && vm && !job->vmid) {
162                 struct amdgpu_ring *ring = job->ring;
163
164                 r = amdgpu_vmid_grab(vm, ring, &job->sync,
165                                      &job->base.s_fence->finished,
166                                      job);
167                 if (r)
168                         DRM_ERROR("Error getting VM ID (%d)\n", r);
169
170                 fence = amdgpu_sync_get_fence(&job->sync, NULL);
171         }
172
173         return fence;
174 }
175
176 static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
177 {
178         struct dma_fence *fence = NULL, *finished;
179         struct amdgpu_device *adev;
180         struct amdgpu_job *job;
181         int r;
182
183         if (!sched_job) {
184                 DRM_ERROR("job is null\n");
185                 return NULL;
186         }
187         job = to_amdgpu_job(sched_job);
188         finished = &job->base.s_fence->finished;
189         adev = job->adev;
190
191         BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
192
193         trace_amdgpu_sched_run_job(job);
194
195         if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
196                 dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
197
198         if (finished->error < 0) {
199                 DRM_INFO("Skip scheduling IBs!\n");
200         } else {
201                 r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
202                                        &fence);
203                 if (r)
204                         DRM_ERROR("Error scheduling IBs (%d)\n", r);
205         }
206         /* if gpu reset, hw fence will be replaced here */
207         dma_fence_put(job->fence);
208         job->fence = dma_fence_get(fence);
209
210         amdgpu_job_free_resources(job);
211         return fence;
212 }
213
214 const struct drm_sched_backend_ops amdgpu_sched_ops = {
215         .dependency = amdgpu_job_dependency,
216         .run_job = amdgpu_job_run,
217         .timedout_job = amdgpu_job_timedout,
218         .free_job = amdgpu_job_free_cb
219 };