Merge tag 'regmap-v5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[linux-2.6-microblaze.git] / drivers / gpu / drm / nouveau / nouveau_gem.c
1 /*
2  * Copyright (C) 2008 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26
27 #include <drm/drm_gem_ttm_helper.h>
28
29 #include "nouveau_drv.h"
30 #include "nouveau_dma.h"
31 #include "nouveau_fence.h"
32 #include "nouveau_abi16.h"
33
34 #include "nouveau_ttm.h"
35 #include "nouveau_gem.h"
36 #include "nouveau_mem.h"
37 #include "nouveau_vmm.h"
38
39 #include <nvif/class.h>
40 #include <nvif/push206e.h>
41
42 void
43 nouveau_gem_object_del(struct drm_gem_object *gem)
44 {
45         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
46         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
47         struct device *dev = drm->dev->dev;
48         int ret;
49
50         ret = pm_runtime_get_sync(dev);
51         if (WARN_ON(ret < 0 && ret != -EACCES)) {
52                 pm_runtime_put_autosuspend(dev);
53                 return;
54         }
55
56         if (gem->import_attach)
57                 drm_prime_gem_destroy(gem, nvbo->bo.sg);
58
59         ttm_bo_put(&nvbo->bo);
60
61         pm_runtime_mark_last_busy(dev);
62         pm_runtime_put_autosuspend(dev);
63 }
64
65 int
66 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
67 {
68         struct nouveau_cli *cli = nouveau_cli(file_priv);
69         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
70         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
71         struct device *dev = drm->dev->dev;
72         struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
73         struct nouveau_vma *vma;
74         int ret;
75
76         if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
77                 return 0;
78
79         ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
80         if (ret)
81                 return ret;
82
83         ret = pm_runtime_get_sync(dev);
84         if (ret < 0 && ret != -EACCES) {
85                 pm_runtime_put_autosuspend(dev);
86                 goto out;
87         }
88
89         ret = nouveau_vma_new(nvbo, vmm, &vma);
90         pm_runtime_mark_last_busy(dev);
91         pm_runtime_put_autosuspend(dev);
92 out:
93         ttm_bo_unreserve(&nvbo->bo);
94         return ret;
95 }
96
97 struct nouveau_gem_object_unmap {
98         struct nouveau_cli_work work;
99         struct nouveau_vma *vma;
100 };
101
102 static void
103 nouveau_gem_object_delete(struct nouveau_vma *vma)
104 {
105         nouveau_fence_unref(&vma->fence);
106         nouveau_vma_del(&vma);
107 }
108
109 static void
110 nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
111 {
112         struct nouveau_gem_object_unmap *work =
113                 container_of(w, typeof(*work), work);
114         nouveau_gem_object_delete(work->vma);
115         kfree(work);
116 }
117
118 static void
119 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
120 {
121         struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
122         struct nouveau_gem_object_unmap *work;
123
124         list_del_init(&vma->head);
125
126         if (!fence) {
127                 nouveau_gem_object_delete(vma);
128                 return;
129         }
130
131         if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
132                 WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
133                 nouveau_gem_object_delete(vma);
134                 return;
135         }
136
137         work->work.func = nouveau_gem_object_delete_work;
138         work->vma = vma;
139         nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
140 }
141
142 void
143 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
144 {
145         struct nouveau_cli *cli = nouveau_cli(file_priv);
146         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
147         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
148         struct device *dev = drm->dev->dev;
149         struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
150         struct nouveau_vma *vma;
151         int ret;
152
153         if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
154                 return;
155
156         ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
157         if (ret)
158                 return;
159
160         vma = nouveau_vma_find(nvbo, vmm);
161         if (vma) {
162                 if (--vma->refs == 0) {
163                         ret = pm_runtime_get_sync(dev);
164                         if (!WARN_ON(ret < 0 && ret != -EACCES)) {
165                                 nouveau_gem_object_unmap(nvbo, vma);
166                                 pm_runtime_mark_last_busy(dev);
167                         }
168                         pm_runtime_put_autosuspend(dev);
169                 }
170         }
171         ttm_bo_unreserve(&nvbo->bo);
172 }
173
174 const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
175         .free = nouveau_gem_object_del,
176         .open = nouveau_gem_object_open,
177         .close = nouveau_gem_object_close,
178         .pin = nouveau_gem_prime_pin,
179         .unpin = nouveau_gem_prime_unpin,
180         .get_sg_table = nouveau_gem_prime_get_sg_table,
181         .vmap = drm_gem_ttm_vmap,
182         .vunmap = drm_gem_ttm_vunmap,
183 };
184
185 int
186 nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
187                 uint32_t tile_mode, uint32_t tile_flags,
188                 struct nouveau_bo **pnvbo)
189 {
190         struct nouveau_drm *drm = cli->drm;
191         struct nouveau_bo *nvbo;
192         int ret;
193
194         if (!(domain & (NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART)))
195                 domain |= NOUVEAU_GEM_DOMAIN_CPU;
196
197         nvbo = nouveau_bo_alloc(cli, &size, &align, domain, tile_mode,
198                                 tile_flags);
199         if (IS_ERR(nvbo))
200                 return PTR_ERR(nvbo);
201
202         nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
203
204         /* Initialize the embedded gem-object. We return a single gem-reference
205          * to the caller, instead of a normal nouveau_bo ttm reference. */
206         ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
207         if (ret) {
208                 drm_gem_object_release(&nvbo->bo.base);
209                 kfree(nvbo);
210                 return ret;
211         }
212
213         ret = nouveau_bo_init(nvbo, size, align, domain, NULL, NULL);
214         if (ret) {
215                 nouveau_bo_ref(NULL, &nvbo);
216                 return ret;
217         }
218
219         /* we restrict allowed domains on nv50+ to only the types
220          * that were requested at creation time.  not possibly on
221          * earlier chips without busting the ABI.
222          */
223         nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
224                               NOUVEAU_GEM_DOMAIN_GART;
225         if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
226                 nvbo->valid_domains &= domain;
227
228         *pnvbo = nvbo;
229         return 0;
230 }
231
232 static int
233 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
234                  struct drm_nouveau_gem_info *rep)
235 {
236         struct nouveau_cli *cli = nouveau_cli(file_priv);
237         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
238         struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
239         struct nouveau_vma *vma;
240
241         if (is_power_of_2(nvbo->valid_domains))
242                 rep->domain = nvbo->valid_domains;
243         else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
244                 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
245         else
246                 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
247         rep->offset = nvbo->offset;
248         if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
249                 vma = nouveau_vma_find(nvbo, vmm);
250                 if (!vma)
251                         return -EINVAL;
252
253                 rep->offset = vma->addr;
254         }
255
256         rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
257         rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
258         rep->tile_mode = nvbo->mode;
259         rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
260         if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
261                 rep->tile_flags |= nvbo->kind << 8;
262         else
263         if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
264                 rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
265         else
266                 rep->tile_flags |= nvbo->zeta;
267         return 0;
268 }
269
270 int
271 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
272                       struct drm_file *file_priv)
273 {
274         struct nouveau_cli *cli = nouveau_cli(file_priv);
275         struct drm_nouveau_gem_new *req = data;
276         struct nouveau_bo *nvbo = NULL;
277         int ret = 0;
278
279         ret = nouveau_gem_new(cli, req->info.size, req->align,
280                               req->info.domain, req->info.tile_mode,
281                               req->info.tile_flags, &nvbo);
282         if (ret)
283                 return ret;
284
285         ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
286                                     &req->info.handle);
287         if (ret == 0) {
288                 ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
289                 if (ret)
290                         drm_gem_handle_delete(file_priv, req->info.handle);
291         }
292
293         /* drop reference from allocate - handle holds it now */
294         drm_gem_object_put(&nvbo->bo.base);
295         return ret;
296 }
297
298 static int
299 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
300                        uint32_t write_domains, uint32_t valid_domains)
301 {
302         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
303         struct ttm_buffer_object *bo = &nvbo->bo;
304         uint32_t domains = valid_domains & nvbo->valid_domains &
305                 (write_domains ? write_domains : read_domains);
306         uint32_t pref_domains = 0;;
307
308         if (!domains)
309                 return -EINVAL;
310
311         valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
312
313         if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
314             bo->mem.mem_type == TTM_PL_VRAM)
315                 pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
316
317         else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
318                  bo->mem.mem_type == TTM_PL_TT)
319                 pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
320
321         else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
322                 pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
323
324         else
325                 pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
326
327         nouveau_bo_placement_set(nvbo, pref_domains, valid_domains);
328
329         return 0;
330 }
331
332 struct validate_op {
333         struct list_head list;
334         struct ww_acquire_ctx ticket;
335 };
336
337 static void
338 validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
339                         struct nouveau_fence *fence,
340                         struct drm_nouveau_gem_pushbuf_bo *pbbo)
341 {
342         struct nouveau_bo *nvbo;
343         struct drm_nouveau_gem_pushbuf_bo *b;
344
345         while (!list_empty(&op->list)) {
346                 nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
347                 b = &pbbo[nvbo->pbbo_index];
348
349                 if (likely(fence)) {
350                         nouveau_bo_fence(nvbo, fence, !!b->write_domains);
351
352                         if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
353                                 struct nouveau_vma *vma =
354                                         (void *)(unsigned long)b->user_priv;
355                                 nouveau_fence_unref(&vma->fence);
356                                 dma_fence_get(&fence->base);
357                                 vma->fence = fence;
358                         }
359                 }
360
361                 if (unlikely(nvbo->validate_mapped)) {
362                         ttm_bo_kunmap(&nvbo->kmap);
363                         nvbo->validate_mapped = false;
364                 }
365
366                 list_del(&nvbo->entry);
367                 nvbo->reserved_by = NULL;
368                 ttm_bo_unreserve(&nvbo->bo);
369                 drm_gem_object_put(&nvbo->bo.base);
370         }
371 }
372
373 static void
374 validate_fini(struct validate_op *op, struct nouveau_channel *chan,
375               struct nouveau_fence *fence,
376               struct drm_nouveau_gem_pushbuf_bo *pbbo)
377 {
378         validate_fini_no_ticket(op, chan, fence, pbbo);
379         ww_acquire_fini(&op->ticket);
380 }
381
382 static int
383 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
384               struct drm_nouveau_gem_pushbuf_bo *pbbo,
385               int nr_buffers, struct validate_op *op)
386 {
387         struct nouveau_cli *cli = nouveau_cli(file_priv);
388         int trycnt = 0;
389         int ret = -EINVAL, i;
390         struct nouveau_bo *res_bo = NULL;
391         LIST_HEAD(gart_list);
392         LIST_HEAD(vram_list);
393         LIST_HEAD(both_list);
394
395         ww_acquire_init(&op->ticket, &reservation_ww_class);
396 retry:
397         if (++trycnt > 100000) {
398                 NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
399                 return -EINVAL;
400         }
401
402         for (i = 0; i < nr_buffers; i++) {
403                 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
404                 struct drm_gem_object *gem;
405                 struct nouveau_bo *nvbo;
406
407                 gem = drm_gem_object_lookup(file_priv, b->handle);
408                 if (!gem) {
409                         NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
410                         ret = -ENOENT;
411                         break;
412                 }
413                 nvbo = nouveau_gem_object(gem);
414                 if (nvbo == res_bo) {
415                         res_bo = NULL;
416                         drm_gem_object_put(gem);
417                         continue;
418                 }
419
420                 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
421                         NV_PRINTK(err, cli, "multiple instances of buffer %d on "
422                                       "validation list\n", b->handle);
423                         drm_gem_object_put(gem);
424                         ret = -EINVAL;
425                         break;
426                 }
427
428                 ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
429                 if (ret) {
430                         list_splice_tail_init(&vram_list, &op->list);
431                         list_splice_tail_init(&gart_list, &op->list);
432                         list_splice_tail_init(&both_list, &op->list);
433                         validate_fini_no_ticket(op, chan, NULL, NULL);
434                         if (unlikely(ret == -EDEADLK)) {
435                                 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
436                                                               &op->ticket);
437                                 if (!ret)
438                                         res_bo = nvbo;
439                         }
440                         if (unlikely(ret)) {
441                                 if (ret != -ERESTARTSYS)
442                                         NV_PRINTK(err, cli, "fail reserve\n");
443                                 break;
444                         }
445                 }
446
447                 if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
448                         struct nouveau_vmm *vmm = chan->vmm;
449                         struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
450                         if (!vma) {
451                                 NV_PRINTK(err, cli, "vma not found!\n");
452                                 ret = -EINVAL;
453                                 break;
454                         }
455
456                         b->user_priv = (uint64_t)(unsigned long)vma;
457                 } else {
458                         b->user_priv = (uint64_t)(unsigned long)nvbo;
459                 }
460
461                 nvbo->reserved_by = file_priv;
462                 nvbo->pbbo_index = i;
463                 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
464                     (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
465                         list_add_tail(&nvbo->entry, &both_list);
466                 else
467                 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
468                         list_add_tail(&nvbo->entry, &vram_list);
469                 else
470                 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
471                         list_add_tail(&nvbo->entry, &gart_list);
472                 else {
473                         NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
474                                  b->valid_domains);
475                         list_add_tail(&nvbo->entry, &both_list);
476                         ret = -EINVAL;
477                         break;
478                 }
479                 if (nvbo == res_bo)
480                         goto retry;
481         }
482
483         ww_acquire_done(&op->ticket);
484         list_splice_tail(&vram_list, &op->list);
485         list_splice_tail(&gart_list, &op->list);
486         list_splice_tail(&both_list, &op->list);
487         if (ret)
488                 validate_fini(op, chan, NULL, NULL);
489         return ret;
490
491 }
492
493 static int
494 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
495               struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo)
496 {
497         struct nouveau_drm *drm = chan->drm;
498         struct nouveau_bo *nvbo;
499         int ret, relocs = 0;
500
501         list_for_each_entry(nvbo, list, entry) {
502                 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
503
504                 ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
505                                              b->write_domains,
506                                              b->valid_domains);
507                 if (unlikely(ret)) {
508                         NV_PRINTK(err, cli, "fail set_domain\n");
509                         return ret;
510                 }
511
512                 ret = nouveau_bo_validate(nvbo, true, false);
513                 if (unlikely(ret)) {
514                         if (ret != -ERESTARTSYS)
515                                 NV_PRINTK(err, cli, "fail ttm_validate\n");
516                         return ret;
517                 }
518
519                 ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
520                 if (unlikely(ret)) {
521                         if (ret != -ERESTARTSYS)
522                                 NV_PRINTK(err, cli, "fail post-validate sync\n");
523                         return ret;
524                 }
525
526                 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
527                         if (nvbo->offset == b->presumed.offset &&
528                             ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
529                               b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
530                              (nvbo->bo.mem.mem_type == TTM_PL_TT &&
531                               b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
532                                 continue;
533
534                         if (nvbo->bo.mem.mem_type == TTM_PL_TT)
535                                 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
536                         else
537                                 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
538                         b->presumed.offset = nvbo->offset;
539                         b->presumed.valid = 0;
540                         relocs++;
541                 }
542         }
543
544         return relocs;
545 }
546
547 static int
548 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
549                              struct drm_file *file_priv,
550                              struct drm_nouveau_gem_pushbuf_bo *pbbo,
551                              int nr_buffers,
552                              struct validate_op *op, bool *apply_relocs)
553 {
554         struct nouveau_cli *cli = nouveau_cli(file_priv);
555         int ret;
556
557         INIT_LIST_HEAD(&op->list);
558
559         if (nr_buffers == 0)
560                 return 0;
561
562         ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
563         if (unlikely(ret)) {
564                 if (ret != -ERESTARTSYS)
565                         NV_PRINTK(err, cli, "validate_init\n");
566                 return ret;
567         }
568
569         ret = validate_list(chan, cli, &op->list, pbbo);
570         if (unlikely(ret < 0)) {
571                 if (ret != -ERESTARTSYS)
572                         NV_PRINTK(err, cli, "validating bo list\n");
573                 validate_fini(op, chan, NULL, NULL);
574                 return ret;
575         } else if (ret > 0) {
576                 *apply_relocs = true;
577         }
578
579         return 0;
580 }
581
582 static inline void
583 u_free(void *addr)
584 {
585         kvfree(addr);
586 }
587
588 static inline void *
589 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
590 {
591         void *mem;
592         void __user *userptr = (void __force __user *)(uintptr_t)user;
593
594         size *= nmemb;
595
596         mem = kvmalloc(size, GFP_KERNEL);
597         if (!mem)
598                 return ERR_PTR(-ENOMEM);
599
600         if (copy_from_user(mem, userptr, size)) {
601                 u_free(mem);
602                 return ERR_PTR(-EFAULT);
603         }
604
605         return mem;
606 }
607
608 static int
609 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
610                                 struct drm_nouveau_gem_pushbuf *req,
611                                 struct drm_nouveau_gem_pushbuf_reloc *reloc,
612                                 struct drm_nouveau_gem_pushbuf_bo *bo)
613 {
614         int ret = 0;
615         unsigned i;
616
617         for (i = 0; i < req->nr_relocs; i++) {
618                 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
619                 struct drm_nouveau_gem_pushbuf_bo *b;
620                 struct nouveau_bo *nvbo;
621                 uint32_t data;
622
623                 if (unlikely(r->bo_index >= req->nr_buffers)) {
624                         NV_PRINTK(err, cli, "reloc bo index invalid\n");
625                         ret = -EINVAL;
626                         break;
627                 }
628
629                 b = &bo[r->bo_index];
630                 if (b->presumed.valid)
631                         continue;
632
633                 if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
634                         NV_PRINTK(err, cli, "reloc container bo index invalid\n");
635                         ret = -EINVAL;
636                         break;
637                 }
638                 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
639
640                 if (unlikely(r->reloc_bo_offset + 4 >
641                              nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
642                         NV_PRINTK(err, cli, "reloc outside of bo\n");
643                         ret = -EINVAL;
644                         break;
645                 }
646
647                 if (!nvbo->kmap.virtual) {
648                         ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
649                                           &nvbo->kmap);
650                         if (ret) {
651                                 NV_PRINTK(err, cli, "failed kmap for reloc\n");
652                                 break;
653                         }
654                         nvbo->validate_mapped = true;
655                 }
656
657                 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
658                         data = b->presumed.offset + r->data;
659                 else
660                 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
661                         data = (b->presumed.offset + r->data) >> 32;
662                 else
663                         data = r->data;
664
665                 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
666                         if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
667                                 data |= r->tor;
668                         else
669                                 data |= r->vor;
670                 }
671
672                 ret = ttm_bo_wait(&nvbo->bo, false, false);
673                 if (ret) {
674                         NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
675                         break;
676                 }
677
678                 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
679         }
680
681         return ret;
682 }
683
684 int
685 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
686                           struct drm_file *file_priv)
687 {
688         struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
689         struct nouveau_cli *cli = nouveau_cli(file_priv);
690         struct nouveau_abi16_chan *temp;
691         struct nouveau_drm *drm = nouveau_drm(dev);
692         struct drm_nouveau_gem_pushbuf *req = data;
693         struct drm_nouveau_gem_pushbuf_push *push;
694         struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
695         struct drm_nouveau_gem_pushbuf_bo *bo;
696         struct nouveau_channel *chan = NULL;
697         struct validate_op op;
698         struct nouveau_fence *fence = NULL;
699         int i, j, ret = 0;
700         bool do_reloc = false, sync = false;
701
702         if (unlikely(!abi16))
703                 return -ENOMEM;
704
705         list_for_each_entry(temp, &abi16->channels, head) {
706                 if (temp->chan->chid == req->channel) {
707                         chan = temp->chan;
708                         break;
709                 }
710         }
711
712         if (!chan)
713                 return nouveau_abi16_put(abi16, -ENOENT);
714         if (unlikely(atomic_read(&chan->killed)))
715                 return nouveau_abi16_put(abi16, -ENODEV);
716
717         sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC;
718
719         req->vram_available = drm->gem.vram_available;
720         req->gart_available = drm->gem.gart_available;
721         if (unlikely(req->nr_push == 0))
722                 goto out_next;
723
724         if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
725                 NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
726                          req->nr_push, NOUVEAU_GEM_MAX_PUSH);
727                 return nouveau_abi16_put(abi16, -EINVAL);
728         }
729
730         if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
731                 NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
732                          req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
733                 return nouveau_abi16_put(abi16, -EINVAL);
734         }
735
736         if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
737                 NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
738                          req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
739                 return nouveau_abi16_put(abi16, -EINVAL);
740         }
741
742         push = u_memcpya(req->push, req->nr_push, sizeof(*push));
743         if (IS_ERR(push))
744                 return nouveau_abi16_put(abi16, PTR_ERR(push));
745
746         bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
747         if (IS_ERR(bo)) {
748                 u_free(push);
749                 return nouveau_abi16_put(abi16, PTR_ERR(bo));
750         }
751
752         /* Ensure all push buffers are on validate list */
753         for (i = 0; i < req->nr_push; i++) {
754                 if (push[i].bo_index >= req->nr_buffers) {
755                         NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
756                         ret = -EINVAL;
757                         goto out_prevalid;
758                 }
759         }
760
761         /* Validate buffer list */
762 revalidate:
763         ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo,
764                                            req->nr_buffers, &op, &do_reloc);
765         if (ret) {
766                 if (ret != -ERESTARTSYS)
767                         NV_PRINTK(err, cli, "validate: %d\n", ret);
768                 goto out_prevalid;
769         }
770
771         /* Apply any relocations that are required */
772         if (do_reloc) {
773                 if (!reloc) {
774                         validate_fini(&op, chan, NULL, bo);
775                         reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
776                         if (IS_ERR(reloc)) {
777                                 ret = PTR_ERR(reloc);
778                                 goto out_prevalid;
779                         }
780
781                         goto revalidate;
782                 }
783
784                 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo);
785                 if (ret) {
786                         NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
787                         goto out;
788                 }
789         }
790
791         if (chan->dma.ib_max) {
792                 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
793                 if (ret) {
794                         NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
795                         goto out;
796                 }
797
798                 for (i = 0; i < req->nr_push; i++) {
799                         struct nouveau_vma *vma = (void *)(unsigned long)
800                                 bo[push[i].bo_index].user_priv;
801
802                         nv50_dma_push(chan, vma->addr + push[i].offset,
803                                       push[i].length);
804                 }
805         } else
806         if (drm->client.device.info.chipset >= 0x25) {
807                 ret = PUSH_WAIT(chan->chan.push, req->nr_push * 2);
808                 if (ret) {
809                         NV_PRINTK(err, cli, "cal_space: %d\n", ret);
810                         goto out;
811                 }
812
813                 for (i = 0; i < req->nr_push; i++) {
814                         struct nouveau_bo *nvbo = (void *)(unsigned long)
815                                 bo[push[i].bo_index].user_priv;
816
817                         PUSH_CALL(chan->chan.push, nvbo->offset + push[i].offset);
818                         PUSH_DATA(chan->chan.push, 0);
819                 }
820         } else {
821                 ret = PUSH_WAIT(chan->chan.push, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
822                 if (ret) {
823                         NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
824                         goto out;
825                 }
826
827                 for (i = 0; i < req->nr_push; i++) {
828                         struct nouveau_bo *nvbo = (void *)(unsigned long)
829                                 bo[push[i].bo_index].user_priv;
830                         uint32_t cmd;
831
832                         cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
833                         cmd |= 0x20000000;
834                         if (unlikely(cmd != req->suffix0)) {
835                                 if (!nvbo->kmap.virtual) {
836                                         ret = ttm_bo_kmap(&nvbo->bo, 0,
837                                                           nvbo->bo.mem.
838                                                           num_pages,
839                                                           &nvbo->kmap);
840                                         if (ret) {
841                                                 WIND_RING(chan);
842                                                 goto out;
843                                         }
844                                         nvbo->validate_mapped = true;
845                                 }
846
847                                 nouveau_bo_wr32(nvbo, (push[i].offset +
848                                                 push[i].length - 8) / 4, cmd);
849                         }
850
851                         PUSH_JUMP(chan->chan.push, nvbo->offset + push[i].offset);
852                         PUSH_DATA(chan->chan.push, 0);
853                         for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
854                                 PUSH_DATA(chan->chan.push, 0);
855                 }
856         }
857
858         ret = nouveau_fence_new(chan, false, &fence);
859         if (ret) {
860                 NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
861                 WIND_RING(chan);
862                 goto out;
863         }
864
865         if (sync) {
866                 if (!(ret = nouveau_fence_wait(fence, false, false))) {
867                         if ((ret = dma_fence_get_status(&fence->base)) == 1)
868                                 ret = 0;
869                 }
870         }
871
872 out:
873         validate_fini(&op, chan, fence, bo);
874         nouveau_fence_unref(&fence);
875
876         if (do_reloc) {
877                 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
878                         u64_to_user_ptr(req->buffers);
879
880                 for (i = 0; i < req->nr_buffers; i++) {
881                         if (bo[i].presumed.valid)
882                                 continue;
883
884                         if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed,
885                                          sizeof(bo[i].presumed))) {
886                                 ret = -EFAULT;
887                                 break;
888                         }
889                 }
890         }
891 out_prevalid:
892         if (!IS_ERR(reloc))
893                 u_free(reloc);
894         u_free(bo);
895         u_free(push);
896
897 out_next:
898         if (chan->dma.ib_max) {
899                 req->suffix0 = 0x00000000;
900                 req->suffix1 = 0x00000000;
901         } else
902         if (drm->client.device.info.chipset >= 0x25) {
903                 req->suffix0 = 0x00020000;
904                 req->suffix1 = 0x00000000;
905         } else {
906                 req->suffix0 = 0x20000000 |
907                               (chan->push.addr + ((chan->dma.cur + 2) << 2));
908                 req->suffix1 = 0x00000000;
909         }
910
911         return nouveau_abi16_put(abi16, ret);
912 }
913
914 int
915 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
916                            struct drm_file *file_priv)
917 {
918         struct drm_nouveau_gem_cpu_prep *req = data;
919         struct drm_gem_object *gem;
920         struct nouveau_bo *nvbo;
921         bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
922         bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
923         long lret;
924         int ret;
925
926         gem = drm_gem_object_lookup(file_priv, req->handle);
927         if (!gem)
928                 return -ENOENT;
929         nvbo = nouveau_gem_object(gem);
930
931         lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
932                                                    no_wait ? 0 : 30 * HZ);
933         if (!lret)
934                 ret = -EBUSY;
935         else if (lret > 0)
936                 ret = 0;
937         else
938                 ret = lret;
939
940         nouveau_bo_sync_for_cpu(nvbo);
941         drm_gem_object_put(gem);
942
943         return ret;
944 }
945
946 int
947 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
948                            struct drm_file *file_priv)
949 {
950         struct drm_nouveau_gem_cpu_fini *req = data;
951         struct drm_gem_object *gem;
952         struct nouveau_bo *nvbo;
953
954         gem = drm_gem_object_lookup(file_priv, req->handle);
955         if (!gem)
956                 return -ENOENT;
957         nvbo = nouveau_gem_object(gem);
958
959         nouveau_bo_sync_for_device(nvbo);
960         drm_gem_object_put(gem);
961         return 0;
962 }
963
964 int
965 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
966                        struct drm_file *file_priv)
967 {
968         struct drm_nouveau_gem_info *req = data;
969         struct drm_gem_object *gem;
970         int ret;
971
972         gem = drm_gem_object_lookup(file_priv, req->handle);
973         if (!gem)
974                 return -ENOENT;
975
976         ret = nouveau_gem_info(file_priv, gem, req);
977         drm_gem_object_put(gem);
978         return ret;
979 }
980