Merge tag 'drm-next-5.6-2019-12-11' of git://people.freedesktop.org/~agd5f/linux...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_cs.c
1 /*
2  * Copyright 2008 Jerome Glisse.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Jerome Glisse <glisse@freedesktop.org>
26  */
27
28 #include <linux/file.h>
29 #include <linux/pagemap.h>
30 #include <linux/sync_file.h>
31
32 #include <drm/amdgpu_drm.h>
33 #include <drm/drm_syncobj.h>
34 #include "amdgpu.h"
35 #include "amdgpu_trace.h"
36 #include "amdgpu_gmc.h"
37 #include "amdgpu_gem.h"
38 #include "amdgpu_ras.h"
39
40 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
41                                       struct drm_amdgpu_cs_chunk_fence *data,
42                                       uint32_t *offset)
43 {
44         struct drm_gem_object *gobj;
45         struct amdgpu_bo *bo;
46         unsigned long size;
47         int r;
48
49         gobj = drm_gem_object_lookup(p->filp, data->handle);
50         if (gobj == NULL)
51                 return -EINVAL;
52
53         bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
54         p->uf_entry.priority = 0;
55         p->uf_entry.tv.bo = &bo->tbo;
56         /* One for TTM and one for the CS job */
57         p->uf_entry.tv.num_shared = 2;
58
59         drm_gem_object_put_unlocked(gobj);
60
61         size = amdgpu_bo_size(bo);
62         if (size != PAGE_SIZE || (data->offset + 8) > size) {
63                 r = -EINVAL;
64                 goto error_unref;
65         }
66
67         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
68                 r = -EINVAL;
69                 goto error_unref;
70         }
71
72         *offset = data->offset;
73
74         return 0;
75
76 error_unref:
77         amdgpu_bo_unref(&bo);
78         return r;
79 }
80
81 static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
82                                       struct drm_amdgpu_bo_list_in *data)
83 {
84         int r;
85         struct drm_amdgpu_bo_list_entry *info = NULL;
86
87         r = amdgpu_bo_create_list_entry_array(data, &info);
88         if (r)
89                 return r;
90
91         r = amdgpu_bo_list_create(p->adev, p->filp, info, data->bo_number,
92                                   &p->bo_list);
93         if (r)
94                 goto error_free;
95
96         kvfree(info);
97         return 0;
98
99 error_free:
100         if (info)
101                 kvfree(info);
102
103         return r;
104 }
105
106 static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
107 {
108         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
109         struct amdgpu_vm *vm = &fpriv->vm;
110         uint64_t *chunk_array_user;
111         uint64_t *chunk_array;
112         unsigned size, num_ibs = 0;
113         uint32_t uf_offset = 0;
114         int i;
115         int ret;
116
117         if (cs->in.num_chunks == 0)
118                 return 0;
119
120         chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
121         if (!chunk_array)
122                 return -ENOMEM;
123
124         p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
125         if (!p->ctx) {
126                 ret = -EINVAL;
127                 goto free_chunk;
128         }
129
130         mutex_lock(&p->ctx->lock);
131
132         /* skip guilty context job */
133         if (atomic_read(&p->ctx->guilty) == 1) {
134                 ret = -ECANCELED;
135                 goto free_chunk;
136         }
137
138         /* get chunks */
139         chunk_array_user = u64_to_user_ptr(cs->in.chunks);
140         if (copy_from_user(chunk_array, chunk_array_user,
141                            sizeof(uint64_t)*cs->in.num_chunks)) {
142                 ret = -EFAULT;
143                 goto free_chunk;
144         }
145
146         p->nchunks = cs->in.num_chunks;
147         p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
148                             GFP_KERNEL);
149         if (!p->chunks) {
150                 ret = -ENOMEM;
151                 goto free_chunk;
152         }
153
154         for (i = 0; i < p->nchunks; i++) {
155                 struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
156                 struct drm_amdgpu_cs_chunk user_chunk;
157                 uint32_t __user *cdata;
158
159                 chunk_ptr = u64_to_user_ptr(chunk_array[i]);
160                 if (copy_from_user(&user_chunk, chunk_ptr,
161                                        sizeof(struct drm_amdgpu_cs_chunk))) {
162                         ret = -EFAULT;
163                         i--;
164                         goto free_partial_kdata;
165                 }
166                 p->chunks[i].chunk_id = user_chunk.chunk_id;
167                 p->chunks[i].length_dw = user_chunk.length_dw;
168
169                 size = p->chunks[i].length_dw;
170                 cdata = u64_to_user_ptr(user_chunk.chunk_data);
171
172                 p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
173                 if (p->chunks[i].kdata == NULL) {
174                         ret = -ENOMEM;
175                         i--;
176                         goto free_partial_kdata;
177                 }
178                 size *= sizeof(uint32_t);
179                 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
180                         ret = -EFAULT;
181                         goto free_partial_kdata;
182                 }
183
184                 switch (p->chunks[i].chunk_id) {
185                 case AMDGPU_CHUNK_ID_IB:
186                         ++num_ibs;
187                         break;
188
189                 case AMDGPU_CHUNK_ID_FENCE:
190                         size = sizeof(struct drm_amdgpu_cs_chunk_fence);
191                         if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
192                                 ret = -EINVAL;
193                                 goto free_partial_kdata;
194                         }
195
196                         ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
197                                                          &uf_offset);
198                         if (ret)
199                                 goto free_partial_kdata;
200
201                         break;
202
203                 case AMDGPU_CHUNK_ID_BO_HANDLES:
204                         size = sizeof(struct drm_amdgpu_bo_list_in);
205                         if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
206                                 ret = -EINVAL;
207                                 goto free_partial_kdata;
208                         }
209
210                         ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
211                         if (ret)
212                                 goto free_partial_kdata;
213
214                         break;
215
216                 case AMDGPU_CHUNK_ID_DEPENDENCIES:
217                 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
218                 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
219                 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
220                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
221                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
222                         break;
223
224                 default:
225                         ret = -EINVAL;
226                         goto free_partial_kdata;
227                 }
228         }
229
230         ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
231         if (ret)
232                 goto free_all_kdata;
233
234         if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
235                 ret = -ECANCELED;
236                 goto free_all_kdata;
237         }
238
239         if (p->uf_entry.tv.bo)
240                 p->job->uf_addr = uf_offset;
241         kfree(chunk_array);
242
243         /* Use this opportunity to fill in task info for the vm */
244         amdgpu_vm_set_task_info(vm);
245
246         return 0;
247
248 free_all_kdata:
249         i = p->nchunks - 1;
250 free_partial_kdata:
251         for (; i >= 0; i--)
252                 kvfree(p->chunks[i].kdata);
253         kfree(p->chunks);
254         p->chunks = NULL;
255         p->nchunks = 0;
256 free_chunk:
257         kfree(chunk_array);
258
259         return ret;
260 }
261
262 /* Convert microseconds to bytes. */
263 static u64 us_to_bytes(struct amdgpu_device *adev, s64 us)
264 {
265         if (us <= 0 || !adev->mm_stats.log2_max_MBps)
266                 return 0;
267
268         /* Since accum_us is incremented by a million per second, just
269          * multiply it by the number of MB/s to get the number of bytes.
270          */
271         return us << adev->mm_stats.log2_max_MBps;
272 }
273
274 static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes)
275 {
276         if (!adev->mm_stats.log2_max_MBps)
277                 return 0;
278
279         return bytes >> adev->mm_stats.log2_max_MBps;
280 }
281
282 /* Returns how many bytes TTM can move right now. If no bytes can be moved,
283  * it returns 0. If it returns non-zero, it's OK to move at least one buffer,
284  * which means it can go over the threshold once. If that happens, the driver
285  * will be in debt and no other buffer migrations can be done until that debt
286  * is repaid.
287  *
288  * This approach allows moving a buffer of any size (it's important to allow
289  * that).
290  *
291  * The currency is simply time in microseconds and it increases as the clock
292  * ticks. The accumulated microseconds (us) are converted to bytes and
293  * returned.
294  */
295 static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
296                                               u64 *max_bytes,
297                                               u64 *max_vis_bytes)
298 {
299         s64 time_us, increment_us;
300         u64 free_vram, total_vram, used_vram;
301
302         /* Allow a maximum of 200 accumulated ms. This is basically per-IB
303          * throttling.
304          *
305          * It means that in order to get full max MBps, at least 5 IBs per
306          * second must be submitted and not more than 200ms apart from each
307          * other.
308          */
309         const s64 us_upper_bound = 200000;
310
311         if (!adev->mm_stats.log2_max_MBps) {
312                 *max_bytes = 0;
313                 *max_vis_bytes = 0;
314                 return;
315         }
316
317         total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
318         used_vram = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
319         free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
320
321         spin_lock(&adev->mm_stats.lock);
322
323         /* Increase the amount of accumulated us. */
324         time_us = ktime_to_us(ktime_get());
325         increment_us = time_us - adev->mm_stats.last_update_us;
326         adev->mm_stats.last_update_us = time_us;
327         adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us,
328                                       us_upper_bound);
329
330         /* This prevents the short period of low performance when the VRAM
331          * usage is low and the driver is in debt or doesn't have enough
332          * accumulated us to fill VRAM quickly.
333          *
334          * The situation can occur in these cases:
335          * - a lot of VRAM is freed by userspace
336          * - the presence of a big buffer causes a lot of evictions
337          *   (solution: split buffers into smaller ones)
338          *
339          * If 128 MB or 1/8th of VRAM is free, start filling it now by setting
340          * accum_us to a positive number.
341          */
342         if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) {
343                 s64 min_us;
344
345                 /* Be more aggresive on dGPUs. Try to fill a portion of free
346                  * VRAM now.
347                  */
348                 if (!(adev->flags & AMD_IS_APU))
349                         min_us = bytes_to_us(adev, free_vram / 4);
350                 else
351                         min_us = 0; /* Reset accum_us on APUs. */
352
353                 adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us);
354         }
355
356         /* This is set to 0 if the driver is in debt to disallow (optional)
357          * buffer moves.
358          */
359         *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us);
360
361         /* Do the same for visible VRAM if half of it is free */
362         if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
363                 u64 total_vis_vram = adev->gmc.visible_vram_size;
364                 u64 used_vis_vram =
365                         amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
366
367                 if (used_vis_vram < total_vis_vram) {
368                         u64 free_vis_vram = total_vis_vram - used_vis_vram;
369                         adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis +
370                                                           increment_us, us_upper_bound);
371
372                         if (free_vis_vram >= total_vis_vram / 2)
373                                 adev->mm_stats.accum_us_vis =
374                                         max(bytes_to_us(adev, free_vis_vram / 2),
375                                             adev->mm_stats.accum_us_vis);
376                 }
377
378                 *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis);
379         } else {
380                 *max_vis_bytes = 0;
381         }
382
383         spin_unlock(&adev->mm_stats.lock);
384 }
385
386 /* Report how many bytes have really been moved for the last command
387  * submission. This can result in a debt that can stop buffer migrations
388  * temporarily.
389  */
390 void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
391                                   u64 num_vis_bytes)
392 {
393         spin_lock(&adev->mm_stats.lock);
394         adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes);
395         adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes);
396         spin_unlock(&adev->mm_stats.lock);
397 }
398
399 static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
400                                  struct amdgpu_bo *bo)
401 {
402         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
403         struct ttm_operation_ctx ctx = {
404                 .interruptible = true,
405                 .no_wait_gpu = false,
406                 .resv = bo->tbo.base.resv,
407                 .flags = 0
408         };
409         uint32_t domain;
410         int r;
411
412         if (bo->pin_count)
413                 return 0;
414
415         /* Don't move this buffer if we have depleted our allowance
416          * to move it. Don't move anything if the threshold is zero.
417          */
418         if (p->bytes_moved < p->bytes_moved_threshold) {
419                 if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
420                     (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
421                         /* And don't move a CPU_ACCESS_REQUIRED BO to limited
422                          * visible VRAM if we've depleted our allowance to do
423                          * that.
424                          */
425                         if (p->bytes_moved_vis < p->bytes_moved_vis_threshold)
426                                 domain = bo->preferred_domains;
427                         else
428                                 domain = bo->allowed_domains;
429                 } else {
430                         domain = bo->preferred_domains;
431                 }
432         } else {
433                 domain = bo->allowed_domains;
434         }
435
436 retry:
437         amdgpu_bo_placement_from_domain(bo, domain);
438         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
439
440         p->bytes_moved += ctx.bytes_moved;
441         if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
442             amdgpu_bo_in_cpu_visible_vram(bo))
443                 p->bytes_moved_vis += ctx.bytes_moved;
444
445         if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
446                 domain = bo->allowed_domains;
447                 goto retry;
448         }
449
450         return r;
451 }
452
453 static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
454 {
455         struct amdgpu_cs_parser *p = param;
456         int r;
457
458         r = amdgpu_cs_bo_validate(p, bo);
459         if (r)
460                 return r;
461
462         if (bo->shadow)
463                 r = amdgpu_cs_bo_validate(p, bo->shadow);
464
465         return r;
466 }
467
468 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
469                             struct list_head *validated)
470 {
471         struct ttm_operation_ctx ctx = { true, false };
472         struct amdgpu_bo_list_entry *lobj;
473         int r;
474
475         list_for_each_entry(lobj, validated, tv.head) {
476                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
477                 struct mm_struct *usermm;
478
479                 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
480                 if (usermm && usermm != current->mm)
481                         return -EPERM;
482
483                 if (amdgpu_ttm_tt_is_userptr(bo->tbo.ttm) &&
484                     lobj->user_invalidated && lobj->user_pages) {
485                         amdgpu_bo_placement_from_domain(bo,
486                                                         AMDGPU_GEM_DOMAIN_CPU);
487                         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
488                         if (r)
489                                 return r;
490
491                         amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
492                                                      lobj->user_pages);
493                 }
494
495                 r = amdgpu_cs_validate(p, bo);
496                 if (r)
497                         return r;
498
499                 kvfree(lobj->user_pages);
500                 lobj->user_pages = NULL;
501         }
502         return 0;
503 }
504
505 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
506                                 union drm_amdgpu_cs *cs)
507 {
508         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
509         struct amdgpu_vm *vm = &fpriv->vm;
510         struct amdgpu_bo_list_entry *e;
511         struct list_head duplicates;
512         struct amdgpu_bo *gds;
513         struct amdgpu_bo *gws;
514         struct amdgpu_bo *oa;
515         int r;
516
517         INIT_LIST_HEAD(&p->validated);
518
519         /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
520         if (cs->in.bo_list_handle) {
521                 if (p->bo_list)
522                         return -EINVAL;
523
524                 r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
525                                        &p->bo_list);
526                 if (r)
527                         return r;
528         } else if (!p->bo_list) {
529                 /* Create a empty bo_list when no handle is provided */
530                 r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
531                                           &p->bo_list);
532                 if (r)
533                         return r;
534         }
535
536         /* One for TTM and one for the CS job */
537         amdgpu_bo_list_for_each_entry(e, p->bo_list)
538                 e->tv.num_shared = 2;
539
540         amdgpu_bo_list_get_list(p->bo_list, &p->validated);
541
542         INIT_LIST_HEAD(&duplicates);
543         amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
544
545         if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(p->uf_entry.tv.bo)->parent)
546                 list_add(&p->uf_entry.tv.head, &p->validated);
547
548         /* Get userptr backing pages. If pages are updated after registered
549          * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do
550          * amdgpu_ttm_backend_bind() to flush and invalidate new pages
551          */
552         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
553                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
554                 bool userpage_invalidated = false;
555                 int i;
556
557                 e->user_pages = kvmalloc_array(bo->tbo.ttm->num_pages,
558                                         sizeof(struct page *),
559                                         GFP_KERNEL | __GFP_ZERO);
560                 if (!e->user_pages) {
561                         DRM_ERROR("calloc failure\n");
562                         return -ENOMEM;
563                 }
564
565                 r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
566                 if (r) {
567                         kvfree(e->user_pages);
568                         e->user_pages = NULL;
569                         return r;
570                 }
571
572                 for (i = 0; i < bo->tbo.ttm->num_pages; i++) {
573                         if (bo->tbo.ttm->pages[i] != e->user_pages[i]) {
574                                 userpage_invalidated = true;
575                                 break;
576                         }
577                 }
578                 e->user_invalidated = userpage_invalidated;
579         }
580
581         r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
582                                    &duplicates);
583         if (unlikely(r != 0)) {
584                 if (r != -ERESTARTSYS)
585                         DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
586                 goto out;
587         }
588
589         amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
590                                           &p->bytes_moved_vis_threshold);
591         p->bytes_moved = 0;
592         p->bytes_moved_vis = 0;
593
594         r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
595                                       amdgpu_cs_validate, p);
596         if (r) {
597                 DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
598                 goto error_validate;
599         }
600
601         r = amdgpu_cs_list_validate(p, &duplicates);
602         if (r)
603                 goto error_validate;
604
605         r = amdgpu_cs_list_validate(p, &p->validated);
606         if (r)
607                 goto error_validate;
608
609         amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
610                                      p->bytes_moved_vis);
611
612         gds = p->bo_list->gds_obj;
613         gws = p->bo_list->gws_obj;
614         oa = p->bo_list->oa_obj;
615
616         amdgpu_bo_list_for_each_entry(e, p->bo_list) {
617                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
618
619                 /* Make sure we use the exclusive slot for shared BOs */
620                 if (bo->prime_shared_count)
621                         e->tv.num_shared = 0;
622                 e->bo_va = amdgpu_vm_bo_find(vm, bo);
623         }
624
625         if (gds) {
626                 p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
627                 p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
628         }
629         if (gws) {
630                 p->job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
631                 p->job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
632         }
633         if (oa) {
634                 p->job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
635                 p->job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
636         }
637
638         if (!r && p->uf_entry.tv.bo) {
639                 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(p->uf_entry.tv.bo);
640
641                 r = amdgpu_ttm_alloc_gart(&uf->tbo);
642                 p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
643         }
644
645 error_validate:
646         if (r)
647                 ttm_eu_backoff_reservation(&p->ticket, &p->validated);
648 out:
649         return r;
650 }
651
652 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
653 {
654         struct amdgpu_bo_list_entry *e;
655         int r;
656
657         list_for_each_entry(e, &p->validated, tv.head) {
658                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
659                 struct dma_resv *resv = bo->tbo.base.resv;
660
661                 r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
662                                      amdgpu_bo_explicit_sync(bo));
663
664                 if (r)
665                         return r;
666         }
667         return 0;
668 }
669
670 /**
671  * cs_parser_fini() - clean parser states
672  * @parser:     parser structure holding parsing context.
673  * @error:      error number
674  *
675  * If error is set than unvalidate buffer, otherwise just free memory
676  * used by parsing context.
677  **/
678 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
679                                   bool backoff)
680 {
681         unsigned i;
682
683         if (error && backoff)
684                 ttm_eu_backoff_reservation(&parser->ticket,
685                                            &parser->validated);
686
687         for (i = 0; i < parser->num_post_deps; i++) {
688                 drm_syncobj_put(parser->post_deps[i].syncobj);
689                 kfree(parser->post_deps[i].chain);
690         }
691         kfree(parser->post_deps);
692
693         dma_fence_put(parser->fence);
694
695         if (parser->ctx) {
696                 mutex_unlock(&parser->ctx->lock);
697                 amdgpu_ctx_put(parser->ctx);
698         }
699         if (parser->bo_list)
700                 amdgpu_bo_list_put(parser->bo_list);
701
702         for (i = 0; i < parser->nchunks; i++)
703                 kvfree(parser->chunks[i].kdata);
704         kfree(parser->chunks);
705         if (parser->job)
706                 amdgpu_job_free(parser->job);
707         if (parser->uf_entry.tv.bo) {
708                 struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
709
710                 amdgpu_bo_unref(&uf);
711         }
712 }
713
714 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
715 {
716         struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
717         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
718         struct amdgpu_device *adev = p->adev;
719         struct amdgpu_vm *vm = &fpriv->vm;
720         struct amdgpu_bo_list_entry *e;
721         struct amdgpu_bo_va *bo_va;
722         struct amdgpu_bo *bo;
723         int r;
724
725         /* Only for UVD/VCE VM emulation */
726         if (ring->funcs->parse_cs || ring->funcs->patch_cs_in_place) {
727                 unsigned i, j;
728
729                 for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
730                         struct drm_amdgpu_cs_chunk_ib *chunk_ib;
731                         struct amdgpu_bo_va_mapping *m;
732                         struct amdgpu_bo *aobj = NULL;
733                         struct amdgpu_cs_chunk *chunk;
734                         uint64_t offset, va_start;
735                         struct amdgpu_ib *ib;
736                         uint8_t *kptr;
737
738                         chunk = &p->chunks[i];
739                         ib = &p->job->ibs[j];
740                         chunk_ib = chunk->kdata;
741
742                         if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
743                                 continue;
744
745                         va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
746                         r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
747                         if (r) {
748                                 DRM_ERROR("IB va_start is invalid\n");
749                                 return r;
750                         }
751
752                         if ((va_start + chunk_ib->ib_bytes) >
753                             (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
754                                 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
755                                 return -EINVAL;
756                         }
757
758                         /* the IB should be reserved at this point */
759                         r = amdgpu_bo_kmap(aobj, (void **)&kptr);
760                         if (r) {
761                                 return r;
762                         }
763
764                         offset = m->start * AMDGPU_GPU_PAGE_SIZE;
765                         kptr += va_start - offset;
766
767                         if (ring->funcs->parse_cs) {
768                                 memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
769                                 amdgpu_bo_kunmap(aobj);
770
771                                 r = amdgpu_ring_parse_cs(ring, p, j);
772                                 if (r)
773                                         return r;
774                         } else {
775                                 ib->ptr = (uint32_t *)kptr;
776                                 r = amdgpu_ring_patch_cs_in_place(ring, p, j);
777                                 amdgpu_bo_kunmap(aobj);
778                                 if (r)
779                                         return r;
780                         }
781
782                         j++;
783                 }
784         }
785
786         if (!p->job->vm)
787                 return amdgpu_cs_sync_rings(p);
788
789
790         r = amdgpu_vm_clear_freed(adev, vm, NULL);
791         if (r)
792                 return r;
793
794         r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
795         if (r)
796                 return r;
797
798         r = amdgpu_sync_vm_fence(&p->job->sync, fpriv->prt_va->last_pt_update);
799         if (r)
800                 return r;
801
802         if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
803                 bo_va = fpriv->csa_va;
804                 BUG_ON(!bo_va);
805                 r = amdgpu_vm_bo_update(adev, bo_va, false);
806                 if (r)
807                         return r;
808
809                 r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
810                 if (r)
811                         return r;
812         }
813
814         amdgpu_bo_list_for_each_entry(e, p->bo_list) {
815                 /* ignore duplicates */
816                 bo = ttm_to_amdgpu_bo(e->tv.bo);
817                 if (!bo)
818                         continue;
819
820                 bo_va = e->bo_va;
821                 if (bo_va == NULL)
822                         continue;
823
824                 r = amdgpu_vm_bo_update(adev, bo_va, false);
825                 if (r)
826                         return r;
827
828                 r = amdgpu_sync_vm_fence(&p->job->sync, bo_va->last_pt_update);
829                 if (r)
830                         return r;
831         }
832
833         r = amdgpu_vm_handle_moved(adev, vm);
834         if (r)
835                 return r;
836
837         r = amdgpu_vm_update_pdes(adev, vm, false);
838         if (r)
839                 return r;
840
841         r = amdgpu_sync_vm_fence(&p->job->sync, vm->last_update);
842         if (r)
843                 return r;
844
845         p->job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.base.bo);
846
847         if (amdgpu_vm_debug) {
848                 /* Invalidate all BOs to test for userspace bugs */
849                 amdgpu_bo_list_for_each_entry(e, p->bo_list) {
850                         struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
851
852                         /* ignore duplicates */
853                         if (!bo)
854                                 continue;
855
856                         amdgpu_vm_bo_invalidate(adev, bo, false);
857                 }
858         }
859
860         return amdgpu_cs_sync_rings(p);
861 }
862
863 static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
864                              struct amdgpu_cs_parser *parser)
865 {
866         struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
867         struct amdgpu_vm *vm = &fpriv->vm;
868         int r, ce_preempt = 0, de_preempt = 0;
869         struct amdgpu_ring *ring;
870         int i, j;
871
872         for (i = 0, j = 0; i < parser->nchunks && j < parser->job->num_ibs; i++) {
873                 struct amdgpu_cs_chunk *chunk;
874                 struct amdgpu_ib *ib;
875                 struct drm_amdgpu_cs_chunk_ib *chunk_ib;
876                 struct drm_sched_entity *entity;
877
878                 chunk = &parser->chunks[i];
879                 ib = &parser->job->ibs[j];
880                 chunk_ib = (struct drm_amdgpu_cs_chunk_ib *)chunk->kdata;
881
882                 if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
883                         continue;
884
885                 if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX &&
886                     (amdgpu_mcbp || amdgpu_sriov_vf(adev))) {
887                         if (chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) {
888                                 if (chunk_ib->flags & AMDGPU_IB_FLAG_CE)
889                                         ce_preempt++;
890                                 else
891                                         de_preempt++;
892                         }
893
894                         /* each GFX command submit allows 0 or 1 IB preemptible for CE & DE */
895                         if (ce_preempt > 1 || de_preempt > 1)
896                                 return -EINVAL;
897                 }
898
899                 r = amdgpu_ctx_get_entity(parser->ctx, chunk_ib->ip_type,
900                                           chunk_ib->ip_instance, chunk_ib->ring,
901                                           &entity);
902                 if (r)
903                         return r;
904
905                 if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
906                         parser->job->preamble_status |=
907                                 AMDGPU_PREAMBLE_IB_PRESENT;
908
909                 if (parser->entity && parser->entity != entity)
910                         return -EINVAL;
911
912                 parser->entity = entity;
913
914                 ring = to_amdgpu_ring(entity->rq->sched);
915                 r =  amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
916                                    chunk_ib->ib_bytes : 0, ib);
917                 if (r) {
918                         DRM_ERROR("Failed to get ib !\n");
919                         return r;
920                 }
921
922                 ib->gpu_addr = chunk_ib->va_start;
923                 ib->length_dw = chunk_ib->ib_bytes / 4;
924                 ib->flags = chunk_ib->flags;
925
926                 j++;
927         }
928
929         /* MM engine doesn't support user fences */
930         ring = to_amdgpu_ring(parser->entity->rq->sched);
931         if (parser->job->uf_addr && ring->funcs->no_user_fence)
932                 return -EINVAL;
933
934         return amdgpu_ctx_wait_prev_fence(parser->ctx, parser->entity);
935 }
936
937 static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
938                                        struct amdgpu_cs_chunk *chunk)
939 {
940         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
941         unsigned num_deps;
942         int i, r;
943         struct drm_amdgpu_cs_chunk_dep *deps;
944
945         deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
946         num_deps = chunk->length_dw * 4 /
947                 sizeof(struct drm_amdgpu_cs_chunk_dep);
948
949         for (i = 0; i < num_deps; ++i) {
950                 struct amdgpu_ctx *ctx;
951                 struct drm_sched_entity *entity;
952                 struct dma_fence *fence;
953
954                 ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
955                 if (ctx == NULL)
956                         return -EINVAL;
957
958                 r = amdgpu_ctx_get_entity(ctx, deps[i].ip_type,
959                                           deps[i].ip_instance,
960                                           deps[i].ring, &entity);
961                 if (r) {
962                         amdgpu_ctx_put(ctx);
963                         return r;
964                 }
965
966                 fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
967                 amdgpu_ctx_put(ctx);
968
969                 if (IS_ERR(fence))
970                         return PTR_ERR(fence);
971                 else if (!fence)
972                         continue;
973
974                 if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
975                         struct drm_sched_fence *s_fence;
976                         struct dma_fence *old = fence;
977
978                         s_fence = to_drm_sched_fence(fence);
979                         fence = dma_fence_get(&s_fence->scheduled);
980                         dma_fence_put(old);
981                 }
982
983                 r = amdgpu_sync_fence(&p->job->sync, fence, true);
984                 dma_fence_put(fence);
985                 if (r)
986                         return r;
987         }
988         return 0;
989 }
990
991 static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
992                                                  uint32_t handle, u64 point,
993                                                  u64 flags)
994 {
995         struct dma_fence *fence;
996         int r;
997
998         r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
999         if (r) {
1000                 DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
1001                           handle, point, r);
1002                 return r;
1003         }
1004
1005         r = amdgpu_sync_fence(&p->job->sync, fence, true);
1006         dma_fence_put(fence);
1007
1008         return r;
1009 }
1010
1011 static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
1012                                             struct amdgpu_cs_chunk *chunk)
1013 {
1014         struct drm_amdgpu_cs_chunk_sem *deps;
1015         unsigned num_deps;
1016         int i, r;
1017
1018         deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1019         num_deps = chunk->length_dw * 4 /
1020                 sizeof(struct drm_amdgpu_cs_chunk_sem);
1021         for (i = 0; i < num_deps; ++i) {
1022                 r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
1023                                                           0, 0);
1024                 if (r)
1025                         return r;
1026         }
1027
1028         return 0;
1029 }
1030
1031
1032 static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
1033                                                      struct amdgpu_cs_chunk *chunk)
1034 {
1035         struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1036         unsigned num_deps;
1037         int i, r;
1038
1039         syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1040         num_deps = chunk->length_dw * 4 /
1041                 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1042         for (i = 0; i < num_deps; ++i) {
1043                 r = amdgpu_syncobj_lookup_and_add_to_sync(p,
1044                                                           syncobj_deps[i].handle,
1045                                                           syncobj_deps[i].point,
1046                                                           syncobj_deps[i].flags);
1047                 if (r)
1048                         return r;
1049         }
1050
1051         return 0;
1052 }
1053
1054 static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
1055                                              struct amdgpu_cs_chunk *chunk)
1056 {
1057         struct drm_amdgpu_cs_chunk_sem *deps;
1058         unsigned num_deps;
1059         int i;
1060
1061         deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
1062         num_deps = chunk->length_dw * 4 /
1063                 sizeof(struct drm_amdgpu_cs_chunk_sem);
1064
1065         if (p->post_deps)
1066                 return -EINVAL;
1067
1068         p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1069                                      GFP_KERNEL);
1070         p->num_post_deps = 0;
1071
1072         if (!p->post_deps)
1073                 return -ENOMEM;
1074
1075
1076         for (i = 0; i < num_deps; ++i) {
1077                 p->post_deps[i].syncobj =
1078                         drm_syncobj_find(p->filp, deps[i].handle);
1079                 if (!p->post_deps[i].syncobj)
1080                         return -EINVAL;
1081                 p->post_deps[i].chain = NULL;
1082                 p->post_deps[i].point = 0;
1083                 p->num_post_deps++;
1084         }
1085
1086         return 0;
1087 }
1088
1089
1090 static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
1091                                                       struct amdgpu_cs_chunk *chunk)
1092 {
1093         struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
1094         unsigned num_deps;
1095         int i;
1096
1097         syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
1098         num_deps = chunk->length_dw * 4 /
1099                 sizeof(struct drm_amdgpu_cs_chunk_syncobj);
1100
1101         if (p->post_deps)
1102                 return -EINVAL;
1103
1104         p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
1105                                      GFP_KERNEL);
1106         p->num_post_deps = 0;
1107
1108         if (!p->post_deps)
1109                 return -ENOMEM;
1110
1111         for (i = 0; i < num_deps; ++i) {
1112                 struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
1113
1114                 dep->chain = NULL;
1115                 if (syncobj_deps[i].point) {
1116                         dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
1117                         if (!dep->chain)
1118                                 return -ENOMEM;
1119                 }
1120
1121                 dep->syncobj = drm_syncobj_find(p->filp,
1122                                                 syncobj_deps[i].handle);
1123                 if (!dep->syncobj) {
1124                         kfree(dep->chain);
1125                         return -EINVAL;
1126                 }
1127                 dep->point = syncobj_deps[i].point;
1128                 p->num_post_deps++;
1129         }
1130
1131         return 0;
1132 }
1133
1134 static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
1135                                   struct amdgpu_cs_parser *p)
1136 {
1137         int i, r;
1138
1139         for (i = 0; i < p->nchunks; ++i) {
1140                 struct amdgpu_cs_chunk *chunk;
1141
1142                 chunk = &p->chunks[i];
1143
1144                 switch (chunk->chunk_id) {
1145                 case AMDGPU_CHUNK_ID_DEPENDENCIES:
1146                 case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
1147                         r = amdgpu_cs_process_fence_dep(p, chunk);
1148                         if (r)
1149                                 return r;
1150                         break;
1151                 case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
1152                         r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
1153                         if (r)
1154                                 return r;
1155                         break;
1156                 case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
1157                         r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
1158                         if (r)
1159                                 return r;
1160                         break;
1161                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
1162                         r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
1163                         if (r)
1164                                 return r;
1165                         break;
1166                 case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
1167                         r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
1168                         if (r)
1169                                 return r;
1170                         break;
1171                 }
1172         }
1173
1174         return 0;
1175 }
1176
1177 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
1178 {
1179         int i;
1180
1181         for (i = 0; i < p->num_post_deps; ++i) {
1182                 if (p->post_deps[i].chain && p->post_deps[i].point) {
1183                         drm_syncobj_add_point(p->post_deps[i].syncobj,
1184                                               p->post_deps[i].chain,
1185                                               p->fence, p->post_deps[i].point);
1186                         p->post_deps[i].chain = NULL;
1187                 } else {
1188                         drm_syncobj_replace_fence(p->post_deps[i].syncobj,
1189                                                   p->fence);
1190                 }
1191         }
1192 }
1193
1194 static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
1195                             union drm_amdgpu_cs *cs)
1196 {
1197         struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
1198         struct drm_sched_entity *entity = p->entity;
1199         enum drm_sched_priority priority;
1200         struct amdgpu_ring *ring;
1201         struct amdgpu_bo_list_entry *e;
1202         struct amdgpu_job *job;
1203         uint64_t seq;
1204         int r;
1205
1206         job = p->job;
1207         p->job = NULL;
1208
1209         r = drm_sched_job_init(&job->base, entity, p->filp);
1210         if (r)
1211                 goto error_unlock;
1212
1213         /* No memory allocation is allowed while holding the notifier lock.
1214          * The lock is held until amdgpu_cs_submit is finished and fence is
1215          * added to BOs.
1216          */
1217         mutex_lock(&p->adev->notifier_lock);
1218
1219         /* If userptr are invalidated after amdgpu_cs_parser_bos(), return
1220          * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl.
1221          */
1222         amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
1223                 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
1224
1225                 r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
1226         }
1227         if (r) {
1228                 r = -EAGAIN;
1229                 goto error_abort;
1230         }
1231
1232         job->owner = p->filp;
1233         p->fence = dma_fence_get(&job->base.s_fence->finished);
1234
1235         amdgpu_ctx_add_fence(p->ctx, entity, p->fence, &seq);
1236         amdgpu_cs_post_dependencies(p);
1237
1238         if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
1239             !p->ctx->preamble_presented) {
1240                 job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
1241                 p->ctx->preamble_presented = true;
1242         }
1243
1244         cs->out.handle = seq;
1245         job->uf_sequence = seq;
1246
1247         amdgpu_job_free_resources(job);
1248
1249         trace_amdgpu_cs_ioctl(job);
1250         amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
1251         priority = job->base.s_priority;
1252         drm_sched_entity_push_job(&job->base, entity);
1253
1254         ring = to_amdgpu_ring(entity->rq->sched);
1255         amdgpu_ring_priority_get(ring, priority);
1256
1257         amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
1258
1259         ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
1260         mutex_unlock(&p->adev->notifier_lock);
1261
1262         return 0;
1263
1264 error_abort:
1265         drm_sched_job_cleanup(&job->base);
1266         mutex_unlock(&p->adev->notifier_lock);
1267
1268 error_unlock:
1269         amdgpu_job_free(job);
1270         return r;
1271 }
1272
1273 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
1274 {
1275         struct amdgpu_device *adev = dev->dev_private;
1276         union drm_amdgpu_cs *cs = data;
1277         struct amdgpu_cs_parser parser = {};
1278         bool reserved_buffers = false;
1279         int i, r;
1280
1281         if (amdgpu_ras_intr_triggered())
1282                 return -EHWPOISON;
1283
1284         if (!adev->accel_working)
1285                 return -EBUSY;
1286
1287         parser.adev = adev;
1288         parser.filp = filp;
1289
1290         r = amdgpu_cs_parser_init(&parser, data);
1291         if (r) {
1292                 DRM_ERROR("Failed to initialize parser %d!\n", r);
1293                 goto out;
1294         }
1295
1296         r = amdgpu_cs_ib_fill(adev, &parser);
1297         if (r)
1298                 goto out;
1299
1300         r = amdgpu_cs_dependencies(adev, &parser);
1301         if (r) {
1302                 DRM_ERROR("Failed in the dependencies handling %d!\n", r);
1303                 goto out;
1304         }
1305
1306         r = amdgpu_cs_parser_bos(&parser, data);
1307         if (r) {
1308                 if (r == -ENOMEM)
1309                         DRM_ERROR("Not enough memory for command submission!\n");
1310                 else if (r != -ERESTARTSYS && r != -EAGAIN)
1311                         DRM_ERROR("Failed to process the buffer list %d!\n", r);
1312                 goto out;
1313         }
1314
1315         reserved_buffers = true;
1316
1317         for (i = 0; i < parser.job->num_ibs; i++)
1318                 trace_amdgpu_cs(&parser, i);
1319
1320         r = amdgpu_cs_vm_handling(&parser);
1321         if (r)
1322                 goto out;
1323
1324         r = amdgpu_cs_submit(&parser, cs);
1325
1326 out:
1327         amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
1328
1329         return r;
1330 }
1331
1332 /**
1333  * amdgpu_cs_wait_ioctl - wait for a command submission to finish
1334  *
1335  * @dev: drm device
1336  * @data: data from userspace
1337  * @filp: file private
1338  *
1339  * Wait for the command submission identified by handle to finish.
1340  */
1341 int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
1342                          struct drm_file *filp)
1343 {
1344         union drm_amdgpu_wait_cs *wait = data;
1345         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
1346         struct drm_sched_entity *entity;
1347         struct amdgpu_ctx *ctx;
1348         struct dma_fence *fence;
1349         long r;
1350
1351         ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
1352         if (ctx == NULL)
1353                 return -EINVAL;
1354
1355         r = amdgpu_ctx_get_entity(ctx, wait->in.ip_type, wait->in.ip_instance,
1356                                   wait->in.ring, &entity);
1357         if (r) {
1358                 amdgpu_ctx_put(ctx);
1359                 return r;
1360         }
1361
1362         fence = amdgpu_ctx_get_fence(ctx, entity, wait->in.handle);
1363         if (IS_ERR(fence))
1364                 r = PTR_ERR(fence);
1365         else if (fence) {
1366                 r = dma_fence_wait_timeout(fence, true, timeout);
1367                 if (r > 0 && fence->error)
1368                         r = fence->error;
1369                 dma_fence_put(fence);
1370         } else
1371                 r = 1;
1372
1373         amdgpu_ctx_put(ctx);
1374         if (r < 0)
1375                 return r;
1376
1377         memset(wait, 0, sizeof(*wait));
1378         wait->out.status = (r == 0);
1379
1380         return 0;
1381 }
1382
1383 /**
1384  * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence
1385  *
1386  * @adev: amdgpu device
1387  * @filp: file private
1388  * @user: drm_amdgpu_fence copied from user space
1389  */
1390 static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
1391                                              struct drm_file *filp,
1392                                              struct drm_amdgpu_fence *user)
1393 {
1394         struct drm_sched_entity *entity;
1395         struct amdgpu_ctx *ctx;
1396         struct dma_fence *fence;
1397         int r;
1398
1399         ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
1400         if (ctx == NULL)
1401                 return ERR_PTR(-EINVAL);
1402
1403         r = amdgpu_ctx_get_entity(ctx, user->ip_type, user->ip_instance,
1404                                   user->ring, &entity);
1405         if (r) {
1406                 amdgpu_ctx_put(ctx);
1407                 return ERR_PTR(r);
1408         }
1409
1410         fence = amdgpu_ctx_get_fence(ctx, entity, user->seq_no);
1411         amdgpu_ctx_put(ctx);
1412
1413         return fence;
1414 }
1415
1416 int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1417                                     struct drm_file *filp)
1418 {
1419         struct amdgpu_device *adev = dev->dev_private;
1420         union drm_amdgpu_fence_to_handle *info = data;
1421         struct dma_fence *fence;
1422         struct drm_syncobj *syncobj;
1423         struct sync_file *sync_file;
1424         int fd, r;
1425
1426         fence = amdgpu_cs_get_fence(adev, filp, &info->in.fence);
1427         if (IS_ERR(fence))
1428                 return PTR_ERR(fence);
1429
1430         if (!fence)
1431                 fence = dma_fence_get_stub();
1432
1433         switch (info->in.what) {
1434         case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ:
1435                 r = drm_syncobj_create(&syncobj, 0, fence);
1436                 dma_fence_put(fence);
1437                 if (r)
1438                         return r;
1439                 r = drm_syncobj_get_handle(filp, syncobj, &info->out.handle);
1440                 drm_syncobj_put(syncobj);
1441                 return r;
1442
1443         case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD:
1444                 r = drm_syncobj_create(&syncobj, 0, fence);
1445                 dma_fence_put(fence);
1446                 if (r)
1447                         return r;
1448                 r = drm_syncobj_get_fd(syncobj, (int*)&info->out.handle);
1449                 drm_syncobj_put(syncobj);
1450                 return r;
1451
1452         case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD:
1453                 fd = get_unused_fd_flags(O_CLOEXEC);
1454                 if (fd < 0) {
1455                         dma_fence_put(fence);
1456                         return fd;
1457                 }
1458
1459                 sync_file = sync_file_create(fence);
1460                 dma_fence_put(fence);
1461                 if (!sync_file) {
1462                         put_unused_fd(fd);
1463                         return -ENOMEM;
1464                 }
1465
1466                 fd_install(fd, sync_file->file);
1467                 info->out.handle = fd;
1468                 return 0;
1469
1470         default:
1471                 return -EINVAL;
1472         }
1473 }
1474
1475 /**
1476  * amdgpu_cs_wait_all_fence - wait on all fences to signal
1477  *
1478  * @adev: amdgpu device
1479  * @filp: file private
1480  * @wait: wait parameters
1481  * @fences: array of drm_amdgpu_fence
1482  */
1483 static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
1484                                      struct drm_file *filp,
1485                                      union drm_amdgpu_wait_fences *wait,
1486                                      struct drm_amdgpu_fence *fences)
1487 {
1488         uint32_t fence_count = wait->in.fence_count;
1489         unsigned int i;
1490         long r = 1;
1491
1492         for (i = 0; i < fence_count; i++) {
1493                 struct dma_fence *fence;
1494                 unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1495
1496                 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1497                 if (IS_ERR(fence))
1498                         return PTR_ERR(fence);
1499                 else if (!fence)
1500                         continue;
1501
1502                 r = dma_fence_wait_timeout(fence, true, timeout);
1503                 dma_fence_put(fence);
1504                 if (r < 0)
1505                         return r;
1506
1507                 if (r == 0)
1508                         break;
1509
1510                 if (fence->error)
1511                         return fence->error;
1512         }
1513
1514         memset(wait, 0, sizeof(*wait));
1515         wait->out.status = (r > 0);
1516
1517         return 0;
1518 }
1519
1520 /**
1521  * amdgpu_cs_wait_any_fence - wait on any fence to signal
1522  *
1523  * @adev: amdgpu device
1524  * @filp: file private
1525  * @wait: wait parameters
1526  * @fences: array of drm_amdgpu_fence
1527  */
1528 static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
1529                                     struct drm_file *filp,
1530                                     union drm_amdgpu_wait_fences *wait,
1531                                     struct drm_amdgpu_fence *fences)
1532 {
1533         unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout_ns);
1534         uint32_t fence_count = wait->in.fence_count;
1535         uint32_t first = ~0;
1536         struct dma_fence **array;
1537         unsigned int i;
1538         long r;
1539
1540         /* Prepare the fence array */
1541         array = kcalloc(fence_count, sizeof(struct dma_fence *), GFP_KERNEL);
1542
1543         if (array == NULL)
1544                 return -ENOMEM;
1545
1546         for (i = 0; i < fence_count; i++) {
1547                 struct dma_fence *fence;
1548
1549                 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1550                 if (IS_ERR(fence)) {
1551                         r = PTR_ERR(fence);
1552                         goto err_free_fence_array;
1553                 } else if (fence) {
1554                         array[i] = fence;
1555                 } else { /* NULL, the fence has been already signaled */
1556                         r = 1;
1557                         first = i;
1558                         goto out;
1559                 }
1560         }
1561
1562         r = dma_fence_wait_any_timeout(array, fence_count, true, timeout,
1563                                        &first);
1564         if (r < 0)
1565                 goto err_free_fence_array;
1566
1567 out:
1568         memset(wait, 0, sizeof(*wait));
1569         wait->out.status = (r > 0);
1570         wait->out.first_signaled = first;
1571
1572         if (first < fence_count && array[first])
1573                 r = array[first]->error;
1574         else
1575                 r = 0;
1576
1577 err_free_fence_array:
1578         for (i = 0; i < fence_count; i++)
1579                 dma_fence_put(array[i]);
1580         kfree(array);
1581
1582         return r;
1583 }
1584
1585 /**
1586  * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish
1587  *
1588  * @dev: drm device
1589  * @data: data from userspace
1590  * @filp: file private
1591  */
1592 int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1593                                 struct drm_file *filp)
1594 {
1595         struct amdgpu_device *adev = dev->dev_private;
1596         union drm_amdgpu_wait_fences *wait = data;
1597         uint32_t fence_count = wait->in.fence_count;
1598         struct drm_amdgpu_fence *fences_user;
1599         struct drm_amdgpu_fence *fences;
1600         int r;
1601
1602         /* Get the fences from userspace */
1603         fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
1604                         GFP_KERNEL);
1605         if (fences == NULL)
1606                 return -ENOMEM;
1607
1608         fences_user = u64_to_user_ptr(wait->in.fences);
1609         if (copy_from_user(fences, fences_user,
1610                 sizeof(struct drm_amdgpu_fence) * fence_count)) {
1611                 r = -EFAULT;
1612                 goto err_free_fences;
1613         }
1614
1615         if (wait->in.wait_all)
1616                 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences);
1617         else
1618                 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences);
1619
1620 err_free_fences:
1621         kfree(fences);
1622
1623         return r;
1624 }
1625
1626 /**
1627  * amdgpu_cs_find_bo_va - find bo_va for VM address
1628  *
1629  * @parser: command submission parser context
1630  * @addr: VM address
1631  * @bo: resulting BO of the mapping found
1632  *
1633  * Search the buffer objects in the command submission context for a certain
1634  * virtual memory address. Returns allocation structure when found, NULL
1635  * otherwise.
1636  */
1637 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1638                            uint64_t addr, struct amdgpu_bo **bo,
1639                            struct amdgpu_bo_va_mapping **map)
1640 {
1641         struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
1642         struct ttm_operation_ctx ctx = { false, false };
1643         struct amdgpu_vm *vm = &fpriv->vm;
1644         struct amdgpu_bo_va_mapping *mapping;
1645         int r;
1646
1647         addr /= AMDGPU_GPU_PAGE_SIZE;
1648
1649         mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
1650         if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
1651                 return -EINVAL;
1652
1653         *bo = mapping->bo_va->base.bo;
1654         *map = mapping;
1655
1656         /* Double check that the BO is reserved by this CS */
1657         if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
1658                 return -EINVAL;
1659
1660         if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
1661                 (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1662                 amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
1663                 r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
1664                 if (r)
1665                         return r;
1666         }
1667
1668         return amdgpu_ttm_alloc_gart(&(*bo)->tbo);
1669 }