drm/amdgpu: add check for callback
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_cs.c
1 /*
2  * Copyright 2008 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Jerome Glisse <glisse@freedesktop.org>
26  */
27 #include <linux/list_sort.h>
28 #include <drm/drmP.h>
29 #include <drm/amdgpu_drm.h>
30 #include "amdgpu.h"
31 #include "amdgpu_trace.h"
32
33 #define AMDGPU_CS_MAX_PRIORITY          32u
34 #define AMDGPU_CS_NUM_BUCKETS           (AMDGPU_CS_MAX_PRIORITY + 1)
35
36 /* This is based on the bucket sort with O(n) time complexity.
37  * An item with priority "i" is added to bucket[i]. The lists are then
38  * concatenated in descending order.
39  */
40 struct amdgpu_cs_buckets {
41         struct list_head bucket[AMDGPU_CS_NUM_BUCKETS];
42 };
43
44 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser,
45                                   int error, bool backoff);
46 static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff);
47 static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser);
48
49 static void amdgpu_cs_buckets_init(struct amdgpu_cs_buckets *b)
50 {
51         unsigned i;
52
53         for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++)
54                 INIT_LIST_HEAD(&b->bucket[i]);
55 }
56
57 static void amdgpu_cs_buckets_add(struct amdgpu_cs_buckets *b,
58                                   struct list_head *item, unsigned priority)
59 {
60         /* Since buffers which appear sooner in the relocation list are
61          * likely to be used more often than buffers which appear later
62          * in the list, the sort mustn't change the ordering of buffers
63          * with the same priority, i.e. it must be stable.
64          */
65         list_add_tail(item, &b->bucket[min(priority, AMDGPU_CS_MAX_PRIORITY)]);
66 }
67
68 static void amdgpu_cs_buckets_get_list(struct amdgpu_cs_buckets *b,
69                                        struct list_head *out_list)
70 {
71         unsigned i;
72
73         /* Connect the sorted buckets in the output list. */
74         for (i = 0; i < AMDGPU_CS_NUM_BUCKETS; i++) {
75                 list_splice(&b->bucket[i], out_list);
76         }
77 }
78
79 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
80                        u32 ip_instance, u32 ring,
81                        struct amdgpu_ring **out_ring)
82 {
83         /* Right now all IPs have only one instance - multiple rings. */
84         if (ip_instance != 0) {
85                 DRM_ERROR("invalid ip instance: %d\n", ip_instance);
86                 return -EINVAL;
87         }
88
89         switch (ip_type) {
90         default:
91                 DRM_ERROR("unknown ip type: %d\n", ip_type);
92                 return -EINVAL;
93         case AMDGPU_HW_IP_GFX:
94                 if (ring < adev->gfx.num_gfx_rings) {
95                         *out_ring = &adev->gfx.gfx_ring[ring];
96                 } else {
97                         DRM_ERROR("only %d gfx rings are supported now\n",
98                                   adev->gfx.num_gfx_rings);
99                         return -EINVAL;
100                 }
101                 break;
102         case AMDGPU_HW_IP_COMPUTE:
103                 if (ring < adev->gfx.num_compute_rings) {
104                         *out_ring = &adev->gfx.compute_ring[ring];
105                 } else {
106                         DRM_ERROR("only %d compute rings are supported now\n",
107                                   adev->gfx.num_compute_rings);
108                         return -EINVAL;
109                 }
110                 break;
111         case AMDGPU_HW_IP_DMA:
112                 if (ring < 2) {
113                         *out_ring = &adev->sdma[ring].ring;
114                 } else {
115                         DRM_ERROR("only two SDMA rings are supported\n");
116                         return -EINVAL;
117                 }
118                 break;
119         case AMDGPU_HW_IP_UVD:
120                 *out_ring = &adev->uvd.ring;
121                 break;
122         case AMDGPU_HW_IP_VCE:
123                 if (ring < 2){
124                         *out_ring = &adev->vce.ring[ring];
125                 } else {
126                         DRM_ERROR("only two VCE rings are supported\n");
127                         return -EINVAL;
128                 }
129                 break;
130         }
131         return 0;
132 }
133
134 static void amdgpu_job_work_func(struct work_struct *work)
135 {
136         struct amdgpu_cs_parser *sched_job =
137                 container_of(work, struct amdgpu_cs_parser,
138                              job_work);
139         mutex_lock(&sched_job->job_lock);
140         if (sched_job->free_job)
141                 sched_job->free_job(sched_job);
142         mutex_unlock(&sched_job->job_lock);
143         /* after processing job, free memory */
144         kfree(sched_job);
145 }
146 struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
147                                                struct drm_file *filp,
148                                                struct amdgpu_ctx *ctx,
149                                                struct amdgpu_ib *ibs,
150                                                uint32_t num_ibs)
151 {
152         struct amdgpu_cs_parser *parser;
153         int i;
154
155         parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
156         if (!parser)
157                 return NULL;
158
159         parser->adev = adev;
160         parser->filp = filp;
161         parser->ctx = ctx;
162         parser->ibs = ibs;
163         parser->num_ibs = num_ibs;
164         if (amdgpu_enable_scheduler) {
165                 mutex_init(&parser->job_lock);
166                 INIT_WORK(&parser->job_work, amdgpu_job_work_func);
167         }
168         for (i = 0; i < num_ibs; i++)
169                 ibs[i].ctx = ctx;
170
171         return parser;
172 }
173
174 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
175 {
176         union drm_amdgpu_cs *cs = data;
177         uint64_t *chunk_array_user;
178         uint64_t *chunk_array = NULL;
179         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
180         struct amdgpu_bo_list *bo_list = NULL;
181         unsigned size, i;
182         int r = 0;
183
184         if (!cs->in.num_chunks)
185                 goto out;
186
187         p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
188         if (!p->ctx) {
189                 r = -EINVAL;
190                 goto out;
191         }
192         bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
193         if (bo_list && !bo_list->has_userptr) {
194                 p->bo_list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
195                 if (!p->bo_list)
196                         return -ENOMEM;
197                 amdgpu_bo_list_copy(p->adev, p->bo_list, bo_list);
198                 amdgpu_bo_list_put(bo_list);
199         } else if (bo_list && bo_list->has_userptr)
200                 p->bo_list = bo_list;
201         else
202                 p->bo_list = NULL;
203
204         /* get chunks */
205         INIT_LIST_HEAD(&p->validated);
206         chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
207         if (chunk_array == NULL) {
208                 r = -ENOMEM;
209                 goto out;
210         }
211
212         chunk_array_user = (uint64_t __user *)(cs->in.chunks);
213         if (copy_from_user(chunk_array, chunk_array_user,
214                            sizeof(uint64_t)*cs->in.num_chunks)) {
215                 r = -EFAULT;
216                 goto out;
217         }
218
219         p->nchunks = cs->in.num_chunks;
220         p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
221                             GFP_KERNEL);
222         if (p->chunks == NULL) {
223                 r = -ENOMEM;
224                 goto out;
225         }
226
227         for (i = 0; i < p->nchunks; i++) {
228                 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
229                 struct drm_amdgpu_cs_chunk user_chunk;
230                 uint32_t __user *cdata;
231
232                 chunk_ptr = (void __user *)chunk_array[i];
233                 if (copy_from_user(&user_chunk, chunk_ptr,
234                                        sizeof(struct drm_amdgpu_cs_chunk))) {
235                         r = -EFAULT;
236                         goto out;
237                 }
238                 p->chunks[i].chunk_id = user_chunk.chunk_id;
239                 p->chunks[i].length_dw = user_chunk.length_dw;
240
241                 size = p->chunks[i].length_dw;
242                 cdata = (void __user *)user_chunk.chunk_data;
243                 p->chunks[i].user_ptr = cdata;
244
245                 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
246                 if (p->chunks[i].kdata == NULL) {
247                         r = -ENOMEM;
248                         goto out;
249                 }
250                 size *= sizeof(uint32_t);
251                 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
252                         r = -EFAULT;
253                         goto out;
254                 }
255
256                 switch (p->chunks[i].chunk_id) {
257                 case AMDGPU_CHUNK_ID_IB:
258                         p->num_ibs++;
259                         break;
260
261                 case AMDGPU_CHUNK_ID_FENCE:
262                         size = sizeof(struct drm_amdgpu_cs_chunk_fence);
263                         if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) {
264                                 uint32_t handle;
265                                 struct drm_gem_object *gobj;
266                                 struct drm_amdgpu_cs_chunk_fence *fence_data;
267
268                                 fence_data = (void *)p->chunks[i].kdata;
269                                 handle = fence_data->handle;
270                                 gobj = drm_gem_object_lookup(p->adev->ddev,
271                                                              p->filp, handle);
272                                 if (gobj == NULL) {
273                                         r = -EINVAL;
274                                         goto out;
275                                 }
276
277                                 p->uf.bo = gem_to_amdgpu_bo(gobj);
278                                 p->uf.offset = fence_data->offset;
279                         } else {
280                                 r = -EINVAL;
281                                 goto out;
282                         }
283                         break;
284
285                 case AMDGPU_CHUNK_ID_DEPENDENCIES:
286                         break;
287
288                 default:
289                         r = -EINVAL;
290                         goto out;
291                 }
292         }
293
294
295         p->ibs = kmalloc_array(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
296         if (!p->ibs)
297                 r = -ENOMEM;
298
299 out:
300         kfree(chunk_array);
301         return r;
302 }
303
304 /* Returns how many bytes TTM can move per IB.
305  */
306 static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
307 {
308         u64 real_vram_size = adev->mc.real_vram_size;
309         u64 vram_usage = atomic64_read(&adev->vram_usage);
310
311         /* This function is based on the current VRAM usage.
312          *
313          * - If all of VRAM is free, allow relocating the number of bytes that
314          *   is equal to 1/4 of the size of VRAM for this IB.
315
316          * - If more than one half of VRAM is occupied, only allow relocating
317          *   1 MB of data for this IB.
318          *
319          * - From 0 to one half of used VRAM, the threshold decreases
320          *   linearly.
321          *         __________________
322          * 1/4 of -|\               |
323          * VRAM    | \              |
324          *         |  \             |
325          *         |   \            |
326          *         |    \           |
327          *         |     \          |
328          *         |      \         |
329          *         |       \________|1 MB
330          *         |----------------|
331          *    VRAM 0 %             100 %
332          *         used            used
333          *
334          * Note: It's a threshold, not a limit. The threshold must be crossed
335          * for buffer relocations to stop, so any buffer of an arbitrary size
336          * can be moved as long as the threshold isn't crossed before
337          * the relocation takes place. We don't want to disable buffer
338          * relocations completely.
339          *
340          * The idea is that buffers should be placed in VRAM at creation time
341          * and TTM should only do a minimum number of relocations during
342          * command submission. In practice, you need to submit at least
343          * a dozen IBs to move all buffers to VRAM if they are in GTT.
344          *
345          * Also, things can get pretty crazy under memory pressure and actual
346          * VRAM usage can change a lot, so playing safe even at 50% does
347          * consistently increase performance.
348          */
349
350         u64 half_vram = real_vram_size >> 1;
351         u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
352         u64 bytes_moved_threshold = half_free_vram >> 1;
353         return max(bytes_moved_threshold, 1024*1024ull);
354 }
355
356 int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
357 {
358         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
359         struct amdgpu_vm *vm = &fpriv->vm;
360         struct amdgpu_device *adev = p->adev;
361         struct amdgpu_bo_list_entry *lobj;
362         struct list_head duplicates;
363         struct amdgpu_bo *bo;
364         u64 bytes_moved = 0, initial_bytes_moved;
365         u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev);
366         int r;
367
368         INIT_LIST_HEAD(&duplicates);
369         r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
370         if (unlikely(r != 0)) {
371                 return r;
372         }
373
374         list_for_each_entry(lobj, &p->validated, tv.head) {
375                 bo = lobj->robj;
376                 if (!bo->pin_count) {
377                         u32 domain = lobj->prefered_domains;
378                         u32 current_domain =
379                                 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
380
381                         /* Check if this buffer will be moved and don't move it
382                          * if we have moved too many buffers for this IB already.
383                          *
384                          * Note that this allows moving at least one buffer of
385                          * any size, because it doesn't take the current "bo"
386                          * into account. We don't want to disallow buffer moves
387                          * completely.
388                          */
389                         if (current_domain != AMDGPU_GEM_DOMAIN_CPU &&
390                             (domain & current_domain) == 0 && /* will be moved */
391                             bytes_moved > bytes_moved_threshold) {
392                                 /* don't move it */
393                                 domain = current_domain;
394                         }
395
396                 retry:
397                         amdgpu_ttm_placement_from_domain(bo, domain);
398                         initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
399                         r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
400                         bytes_moved += atomic64_read(&adev->num_bytes_moved) -
401                                        initial_bytes_moved;
402
403                         if (unlikely(r)) {
404                                 if (r != -ERESTARTSYS && domain != lobj->allowed_domains) {
405                                         domain = lobj->allowed_domains;
406                                         goto retry;
407                                 }
408                                 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
409                                 return r;
410                         }
411                 }
412                 lobj->bo_va = amdgpu_vm_bo_find(vm, bo);
413         }
414         return 0;
415 }
416
417 static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
418 {
419         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
420         struct amdgpu_cs_buckets buckets;
421         bool need_mmap_lock = false;
422         int i, r;
423
424         if (p->bo_list) {
425                 need_mmap_lock = p->bo_list->has_userptr;
426                 amdgpu_cs_buckets_init(&buckets);
427                 for (i = 0; i < p->bo_list->num_entries; i++)
428                         amdgpu_cs_buckets_add(&buckets, &p->bo_list->array[i].tv.head,
429                                                                   p->bo_list->array[i].priority);
430
431                 amdgpu_cs_buckets_get_list(&buckets, &p->validated);
432         }
433
434         p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm,
435                                       &p->validated);
436
437         if (need_mmap_lock)
438                 down_read(&current->mm->mmap_sem);
439
440         r = amdgpu_cs_list_validate(p);
441
442         if (need_mmap_lock)
443                 up_read(&current->mm->mmap_sem);
444
445         return r;
446 }
447
448 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
449 {
450         struct amdgpu_bo_list_entry *e;
451         int r;
452
453         list_for_each_entry(e, &p->validated, tv.head) {
454                 struct reservation_object *resv = e->robj->tbo.resv;
455                 r = amdgpu_sync_resv(p->adev, &p->ibs[0].sync, resv, p->filp);
456
457                 if (r)
458                         return r;
459         }
460         return 0;
461 }
462
463 static int cmp_size_smaller_first(void *priv, struct list_head *a,
464                                   struct list_head *b)
465 {
466         struct amdgpu_bo_list_entry *la = list_entry(a, struct amdgpu_bo_list_entry, tv.head);
467         struct amdgpu_bo_list_entry *lb = list_entry(b, struct amdgpu_bo_list_entry, tv.head);
468
469         /* Sort A before B if A is smaller. */
470         return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
471 }
472
473 /**
474  * cs_parser_fini() - clean parser states
475  * @parser:     parser structure holding parsing context.
476  * @error:      error number
477  *
478  * If error is set than unvalidate buffer, otherwise just free memory
479  * used by parsing context.
480  **/
481 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
482 {
483        amdgpu_cs_parser_fini_early(parser, error, backoff);
484        amdgpu_cs_parser_fini_late(parser);
485 }
486
487 static int amdgpu_cs_parser_run_job(
488         struct amdgpu_cs_parser *sched_job)
489 {
490        amdgpu_cs_parser_fini_early(sched_job, 0, true);
491        return 0;
492 }
493
494 static int amdgpu_cs_parser_free_job(
495         struct amdgpu_cs_parser *sched_job)
496 {
497        amdgpu_cs_parser_fini_late(sched_job);
498        return 0;
499 }
500
501 static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff)
502 {
503         if (!error) {
504                 /* Sort the buffer list from the smallest to largest buffer,
505                  * which affects the order of buffers in the LRU list.
506                  * This assures that the smallest buffers are added first
507                  * to the LRU list, so they are likely to be later evicted
508                  * first, instead of large buffers whose eviction is more
509                  * expensive.
510                  *
511                  * This slightly lowers the number of bytes moved by TTM
512                  * per frame under memory pressure.
513                  */
514                 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
515
516                 ttm_eu_fence_buffer_objects(&parser->ticket,
517                                 &parser->validated,
518                                 &parser->ibs[parser->num_ibs-1].fence->base);
519         } else if (backoff) {
520                 ttm_eu_backoff_reservation(&parser->ticket,
521                                            &parser->validated);
522         }
523 }
524
525 static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
526 {
527         unsigned i;
528         if (parser->ctx)
529                 amdgpu_ctx_put(parser->ctx);
530         if (parser->bo_list) {
531                 if (!parser->bo_list->has_userptr)
532                         amdgpu_bo_list_free(parser->bo_list);
533                 else
534                         amdgpu_bo_list_put(parser->bo_list);
535         }
536         drm_free_large(parser->vm_bos);
537         for (i = 0; i < parser->nchunks; i++)
538                 drm_free_large(parser->chunks[i].kdata);
539         kfree(parser->chunks);
540         if (parser->ibs)
541                 for (i = 0; i < parser->num_ibs; i++)
542                         amdgpu_ib_free(parser->adev, &parser->ibs[i]);
543         kfree(parser->ibs);
544         if (parser->uf.bo)
545                 drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
546
547         if (!amdgpu_enable_scheduler)
548                 kfree(parser);
549 }
550
551 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
552                                    struct amdgpu_vm *vm)
553 {
554         struct amdgpu_device *adev = p->adev;
555         struct amdgpu_bo_va *bo_va;
556         struct amdgpu_bo *bo;
557         int i, r;
558
559         r = amdgpu_vm_update_page_directory(adev, vm);
560         if (r)
561                 return r;
562
563         r = amdgpu_vm_clear_freed(adev, vm);
564         if (r)
565                 return r;
566
567         if (p->bo_list) {
568                 for (i = 0; i < p->bo_list->num_entries; i++) {
569                         struct fence *f;
570
571                         /* ignore duplicates */
572                         bo = p->bo_list->array[i].robj;
573                         if (!bo)
574                                 continue;
575
576                         bo_va = p->bo_list->array[i].bo_va;
577                         if (bo_va == NULL)
578                                 continue;
579
580                         r = amdgpu_vm_bo_update(adev, bo_va, &bo->tbo.mem);
581                         if (r)
582                                 return r;
583
584                         f = &bo_va->last_pt_update->base;
585                         r = amdgpu_sync_fence(adev, &p->ibs[0].sync, f);
586                         if (r)
587                                 return r;
588                 }
589         }
590
591         return amdgpu_vm_clear_invalids(adev, vm, &p->ibs[0].sync);
592 }
593
594 static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
595                                  struct amdgpu_cs_parser *parser)
596 {
597         struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
598         struct amdgpu_vm *vm = &fpriv->vm;
599         struct amdgpu_ring *ring;
600         int i, r;
601
602         if (parser->num_ibs == 0)
603                 return 0;
604
605         /* Only for UVD/VCE VM emulation */
606         for (i = 0; i < parser->num_ibs; i++) {
607                 ring = parser->ibs[i].ring;
608                 if (ring->funcs->parse_cs) {
609                         r = amdgpu_ring_parse_cs(ring, parser, i);
610                         if (r)
611                                 return r;
612                 }
613         }
614
615         mutex_lock(&vm->mutex);
616         r = amdgpu_bo_vm_update_pte(parser, vm);
617         if (r) {
618                 goto out;
619         }
620         amdgpu_cs_sync_rings(parser);
621         if (!amdgpu_enable_scheduler)
622                 r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
623                                        parser->filp);
624
625 out:
626         mutex_unlock(&vm->mutex);
627         return r;
628 }
629
630 static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r)
631 {
632         if (r == -EDEADLK) {
633                 r = amdgpu_gpu_reset(adev);
634                 if (!r)
635                         r = -EAGAIN;
636         }
637         return r;
638 }
639
640 static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
641                              struct amdgpu_cs_parser *parser)
642 {
643         struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
644         struct amdgpu_vm *vm = &fpriv->vm;
645         int i, j;
646         int r;
647
648         for (i = 0, j = 0; i < parser->nchunks && j < parser->num_ibs; i++) {
649                 struct amdgpu_cs_chunk *chunk;
650                 struct amdgpu_ib *ib;
651                 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
652                 struct amdgpu_ring *ring;
653
654                 chunk = &parser->chunks[i];
655                 ib = &parser->ibs[j];
656                 chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
657
658                 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
659                         continue;
660
661                 r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
662                                        chunk_ib->ip_instance, chunk_ib->ring,
663                                        &ring);
664                 if (r)
665                         return r;
666
667                 if (ring->funcs->parse_cs) {
668                         struct amdgpu_bo_va_mapping *m;
669                         struct amdgpu_bo *aobj = NULL;
670                         uint64_t offset;
671                         uint8_t *kptr;
672
673                         m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
674                                                    &aobj);
675                         if (!aobj) {
676                                 DRM_ERROR("IB va_start is invalid\n");
677                                 return -EINVAL;
678                         }
679
680                         if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
681                             (m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
682                                 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
683                                 return -EINVAL;
684                         }
685
686                         /* the IB should be reserved at this point */
687                         r = amdgpu_bo_kmap(aobj, (void **)&kptr);
688                         if (r) {
689                                 return r;
690                         }
691
692                         offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
693                         kptr += chunk_ib->va_start - offset;
694
695                         r =  amdgpu_ib_get(ring, NULL, chunk_ib->ib_bytes, ib);
696                         if (r) {
697                                 DRM_ERROR("Failed to get ib !\n");
698                                 return r;
699                         }
700
701                         memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
702                         amdgpu_bo_kunmap(aobj);
703                 } else {
704                         r =  amdgpu_ib_get(ring, vm, 0, ib);
705                         if (r) {
706                                 DRM_ERROR("Failed to get ib !\n");
707                                 return r;
708                         }
709
710                         ib->gpu_addr = chunk_ib->va_start;
711                 }
712
713                 ib->length_dw = chunk_ib->ib_bytes / 4;
714                 ib->flags = chunk_ib->flags;
715                 ib->ctx = parser->ctx;
716                 j++;
717         }
718
719         if (!parser->num_ibs)
720                 return 0;
721
722         /* add GDS resources to first IB */
723         if (parser->bo_list) {
724                 struct amdgpu_bo *gds = parser->bo_list->gds_obj;
725                 struct amdgpu_bo *gws = parser->bo_list->gws_obj;
726                 struct amdgpu_bo *oa = parser->bo_list->oa_obj;
727                 struct amdgpu_ib *ib = &parser->ibs[0];
728
729                 if (gds) {
730                         ib->gds_base = amdgpu_bo_gpu_offset(gds);
731                         ib->gds_size = amdgpu_bo_size(gds);
732                 }
733                 if (gws) {
734                         ib->gws_base = amdgpu_bo_gpu_offset(gws);
735                         ib->gws_size = amdgpu_bo_size(gws);
736                 }
737                 if (oa) {
738                         ib->oa_base = amdgpu_bo_gpu_offset(oa);
739                         ib->oa_size = amdgpu_bo_size(oa);
740                 }
741         }
742
743         /* wrap the last IB with user fence */
744         if (parser->uf.bo) {
745                 struct amdgpu_ib *ib = &parser->ibs[parser->num_ibs - 1];
746
747                 /* UVD & VCE fw doesn't support user fences */
748                 if (ib->ring->type == AMDGPU_RING_TYPE_UVD ||
749                     ib->ring->type == AMDGPU_RING_TYPE_VCE)
750                         return -EINVAL;
751
752                 ib->user = &parser->uf;
753         }
754
755         return 0;
756 }
757
758 static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
759                                   struct amdgpu_cs_parser *p)
760 {
761         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
762         struct amdgpu_ib *ib;
763         int i, j, r;
764
765         if (!p->num_ibs)
766                 return 0;
767
768         /* Add dependencies to first IB */
769         ib = &p->ibs[0];
770         for (i = 0; i < p->nchunks; ++i) {
771                 struct drm_amdgpu_cs_chunk_dep *deps;
772                 struct amdgpu_cs_chunk *chunk;
773                 unsigned num_deps;
774
775                 chunk = &p->chunks[i];
776
777                 if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES)
778                         continue;
779
780                 deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
781                 num_deps = chunk->length_dw * 4 /
782                         sizeof(struct drm_amdgpu_cs_chunk_dep);
783
784                 for (j = 0; j < num_deps; ++j) {
785                         struct amdgpu_ring *ring;
786                         struct amdgpu_ctx *ctx;
787                         struct fence *fence;
788
789                         r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
790                                                deps[j].ip_instance,
791                                                deps[j].ring, &ring);
792                         if (r)
793                                 return r;
794
795                         ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
796                         if (ctx == NULL)
797                                 return -EINVAL;
798
799                         fence = amdgpu_ctx_get_fence(ctx, ring,
800                                                      deps[j].handle);
801                         if (IS_ERR(fence)) {
802                                 r = PTR_ERR(fence);
803                                 amdgpu_ctx_put(ctx);
804                                 return r;
805
806                         } else if (fence) {
807                                 r = amdgpu_sync_fence(adev, &ib->sync, fence);
808                                 fence_put(fence);
809                                 amdgpu_ctx_put(ctx);
810                                 if (r)
811                                         return r;
812                         }
813                 }
814         }
815
816         return 0;
817 }
818
819 static int amdgpu_cs_parser_prepare_job(struct amdgpu_cs_parser *sched_job)
820 {
821         int r, i;
822         struct amdgpu_cs_parser *parser = sched_job;
823         struct amdgpu_device *adev = sched_job->adev;
824         bool reserved_buffers = false;
825
826         r = amdgpu_cs_parser_relocs(parser);
827         if (r) {
828                 if (r != -ERESTARTSYS) {
829                         if (r == -ENOMEM)
830                                 DRM_ERROR("Not enough memory for command submission!\n");
831                         else
832                                 DRM_ERROR("Failed to process the buffer list %d!\n", r);
833                 }
834         }
835
836         if (!r) {
837                 reserved_buffers = true;
838                 r = amdgpu_cs_ib_fill(adev, parser);
839         }
840         if (!r) {
841                 r = amdgpu_cs_dependencies(adev, parser);
842                 if (r)
843                         DRM_ERROR("Failed in the dependencies handling %d!\n", r);
844         }
845         if (r) {
846                 amdgpu_cs_parser_fini(parser, r, reserved_buffers);
847                return r;
848         }
849
850         for (i = 0; i < parser->num_ibs; i++)
851                 trace_amdgpu_cs(parser, i);
852
853        r = amdgpu_cs_ib_vm_chunk(adev, parser);
854        return r;
855 }
856
857 static struct amdgpu_ring *amdgpu_cs_parser_get_ring(
858         struct amdgpu_device *adev,
859         struct amdgpu_cs_parser *parser)
860 {
861         int i, r;
862
863         struct amdgpu_cs_chunk *chunk;
864         struct drm_amdgpu_cs_chunk_ib *chunk_ib;
865         struct amdgpu_ring *ring;
866         for (i = 0; i < parser->nchunks; i++) {
867                 chunk = &parser->chunks[i];
868                 chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
869
870                 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
871                         continue;
872
873                 r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
874                                        chunk_ib->ip_instance, chunk_ib->ring,
875                                        &ring);
876                 if (r)
877                         return NULL;
878                 break;
879         }
880         return ring;
881 }
882
883 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
884 {
885         struct amdgpu_device *adev = dev->dev_private;
886         union drm_amdgpu_cs *cs = data;
887         struct amdgpu_cs_parser *parser;
888         int r;
889
890         down_read(&adev->exclusive_lock);
891         if (!adev->accel_working) {
892                 up_read(&adev->exclusive_lock);
893                 return -EBUSY;
894         }
895
896         parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
897         if (!parser)
898                 return -ENOMEM;
899         r = amdgpu_cs_parser_init(parser, data);
900         if (r) {
901                 DRM_ERROR("Failed to initialize parser !\n");
902                 amdgpu_cs_parser_fini(parser, r, false);
903                 up_read(&adev->exclusive_lock);
904                 r = amdgpu_cs_handle_lockup(adev, r);
905                 return r;
906         }
907
908         if (amdgpu_enable_scheduler && parser->num_ibs) {
909                 struct amdgpu_ring * ring =
910                         amdgpu_cs_parser_get_ring(adev, parser);
911                 parser->uf.sequence = atomic64_inc_return(
912                         &parser->ctx->rings[ring->idx].c_entity.last_queued_v_seq);
913                 if (ring->is_pte_ring || (parser->bo_list && parser->bo_list->has_userptr)) {
914                         r = amdgpu_cs_parser_prepare_job(parser);
915                         if (r)
916                                 goto out;
917                 } else
918                         parser->prepare_job = amdgpu_cs_parser_prepare_job;
919                 parser->ring = ring;
920                 parser->run_job = amdgpu_cs_parser_run_job;
921                 parser->free_job = amdgpu_cs_parser_free_job;
922                 amd_sched_push_job(ring->scheduler,
923                                    &parser->ctx->rings[ring->idx].c_entity,
924                                    parser);
925                 cs->out.handle = parser->uf.sequence;
926                 up_read(&adev->exclusive_lock);
927                 return 0;
928         }
929         r = amdgpu_cs_parser_prepare_job(parser);
930         if (r)
931                 goto out;
932
933         cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
934 out:
935         amdgpu_cs_parser_fini(parser, r, true);
936         up_read(&adev->exclusive_lock);
937         r = amdgpu_cs_handle_lockup(adev, r);
938         return r;
939 }
940
941 /**
942  * amdgpu_cs_wait_ioctl - wait for a command submission to finish
943  *
944  * @dev: drm device
945  * @data: data from userspace
946  * @filp: file private
947  *
948  * Wait for the command submission identified by handle to finish.
949  */
950 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
951                          struct drm_file *filp)
952 {
953         union drm_amdgpu_wait_cs *wait = data;
954         struct amdgpu_device *adev = dev->dev_private;
955         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
956         struct amdgpu_ring *ring = NULL;
957         struct amdgpu_ctx *ctx;
958         struct fence *fence;
959         long r;
960
961         r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
962                                wait->in.ring, &ring);
963         if (r)
964                 return r;
965
966         ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
967         if (ctx == NULL)
968                 return -EINVAL;
969
970         fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
971         if (IS_ERR(fence))
972                 r = PTR_ERR(fence);
973         else if (fence) {
974                 r = fence_wait_timeout(fence, true, timeout);
975                 fence_put(fence);
976         } else
977                 r = 1;
978
979         amdgpu_ctx_put(ctx);
980         if (r < 0)
981                 return r;
982
983         memset(wait, 0, sizeof(*wait));
984         wait->out.status = (r == 0);
985
986         return 0;
987 }
988
989 /**
990  * amdgpu_cs_find_bo_va - find bo_va for VM address
991  *
992  * @parser: command submission parser context
993  * @addr: VM address
994  * @bo: resulting BO of the mapping found
995  *
996  * Search the buffer objects in the command submission context for a certain
997  * virtual memory address. Returns allocation structure when found, NULL
998  * otherwise.
999  */
1000 struct amdgpu_bo_va_mapping *
1001 amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1002                        uint64_t addr, struct amdgpu_bo **bo)
1003 {
1004         struct amdgpu_bo_list_entry *reloc;
1005         struct amdgpu_bo_va_mapping *mapping;
1006
1007         addr /= AMDGPU_GPU_PAGE_SIZE;
1008
1009         list_for_each_entry(reloc, &parser->validated, tv.head) {
1010                 if (!reloc->bo_va)
1011                         continue;
1012
1013                 list_for_each_entry(mapping, &reloc->bo_va->mappings, list) {
1014                         if (mapping->it.start > addr ||
1015                             addr > mapping->it.last)
1016                                 continue;
1017
1018                         *bo = reloc->bo_va->bo;
1019                         return mapping;
1020                 }
1021         }
1022
1023         return NULL;
1024 }