Merge drm/drm-next into drm-intel-next-queued
[linux-2.6-microblaze.git] / drivers / gpu / drm / nouveau / nouveau_gem.c
1 /*
2  * Copyright (C) 2008 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26
27 #include "nouveau_drv.h"
28 #include "nouveau_dma.h"
29 #include "nouveau_fence.h"
30 #include "nouveau_abi16.h"
31
32 #include "nouveau_ttm.h"
33 #include "nouveau_gem.h"
34 #include "nouveau_mem.h"
35 #include "nouveau_vmm.h"
36
37 #include <nvif/class.h>
38
39 void
40 nouveau_gem_object_del(struct drm_gem_object *gem)
41 {
42         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
43         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
44         struct device *dev = drm->dev->dev;
45         int ret;
46
47         ret = pm_runtime_get_sync(dev);
48         if (WARN_ON(ret < 0 && ret != -EACCES))
49                 return;
50
51         if (gem->import_attach)
52                 drm_prime_gem_destroy(gem, nvbo->bo.sg);
53
54         drm_gem_object_release(gem);
55
56         /* reset filp so nouveau_bo_del_ttm() can test for it */
57         gem->filp = NULL;
58         ttm_bo_put(&nvbo->bo);
59
60         pm_runtime_mark_last_busy(dev);
61         pm_runtime_put_autosuspend(dev);
62 }
63
64 int
65 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
66 {
67         struct nouveau_cli *cli = nouveau_cli(file_priv);
68         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
69         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
70         struct device *dev = drm->dev->dev;
71         struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
72         struct nouveau_vma *vma;
73         int ret;
74
75         if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
76                 return 0;
77
78         ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
79         if (ret)
80                 return ret;
81
82         ret = pm_runtime_get_sync(dev);
83         if (ret < 0 && ret != -EACCES)
84                 goto out;
85
86         ret = nouveau_vma_new(nvbo, vmm, &vma);
87         pm_runtime_mark_last_busy(dev);
88         pm_runtime_put_autosuspend(dev);
89 out:
90         ttm_bo_unreserve(&nvbo->bo);
91         return ret;
92 }
93
94 struct nouveau_gem_object_unmap {
95         struct nouveau_cli_work work;
96         struct nouveau_vma *vma;
97 };
98
99 static void
100 nouveau_gem_object_delete(struct nouveau_vma *vma)
101 {
102         nouveau_fence_unref(&vma->fence);
103         nouveau_vma_del(&vma);
104 }
105
106 static void
107 nouveau_gem_object_delete_work(struct nouveau_cli_work *w)
108 {
109         struct nouveau_gem_object_unmap *work =
110                 container_of(w, typeof(*work), work);
111         nouveau_gem_object_delete(work->vma);
112         kfree(work);
113 }
114
115 static void
116 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
117 {
118         struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL;
119         struct nouveau_gem_object_unmap *work;
120
121         list_del_init(&vma->head);
122
123         if (!fence) {
124                 nouveau_gem_object_delete(vma);
125                 return;
126         }
127
128         if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) {
129                 WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0);
130                 nouveau_gem_object_delete(vma);
131                 return;
132         }
133
134         work->work.func = nouveau_gem_object_delete_work;
135         work->vma = vma;
136         nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work);
137 }
138
139 void
140 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
141 {
142         struct nouveau_cli *cli = nouveau_cli(file_priv);
143         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
144         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
145         struct device *dev = drm->dev->dev;
146         struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm;
147         struct nouveau_vma *vma;
148         int ret;
149
150         if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50)
151                 return;
152
153         ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
154         if (ret)
155                 return;
156
157         vma = nouveau_vma_find(nvbo, vmm);
158         if (vma) {
159                 if (--vma->refs == 0) {
160                         ret = pm_runtime_get_sync(dev);
161                         if (!WARN_ON(ret < 0 && ret != -EACCES)) {
162                                 nouveau_gem_object_unmap(nvbo, vma);
163                                 pm_runtime_mark_last_busy(dev);
164                                 pm_runtime_put_autosuspend(dev);
165                         }
166                 }
167         }
168         ttm_bo_unreserve(&nvbo->bo);
169 }
170
171 int
172 nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
173                 uint32_t tile_mode, uint32_t tile_flags,
174                 struct nouveau_bo **pnvbo)
175 {
176         struct nouveau_drm *drm = cli->drm;
177         struct nouveau_bo *nvbo;
178         u32 flags = 0;
179         int ret;
180
181         if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
182                 flags |= TTM_PL_FLAG_VRAM;
183         if (domain & NOUVEAU_GEM_DOMAIN_GART)
184                 flags |= TTM_PL_FLAG_TT;
185         if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
186                 flags |= TTM_PL_FLAG_SYSTEM;
187
188         if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
189                 flags |= TTM_PL_FLAG_UNCACHED;
190
191         ret = nouveau_bo_new(cli, size, align, flags, tile_mode,
192                              tile_flags, NULL, NULL, pnvbo);
193         if (ret)
194                 return ret;
195         nvbo = *pnvbo;
196
197         /* we restrict allowed domains on nv50+ to only the types
198          * that were requested at creation time.  not possibly on
199          * earlier chips without busting the ABI.
200          */
201         nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
202                               NOUVEAU_GEM_DOMAIN_GART;
203         if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
204                 nvbo->valid_domains &= domain;
205
206         /* Initialize the embedded gem-object. We return a single gem-reference
207          * to the caller, instead of a normal nouveau_bo ttm reference. */
208         ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, nvbo->bo.mem.size);
209         if (ret) {
210                 nouveau_bo_ref(NULL, pnvbo);
211                 return -ENOMEM;
212         }
213
214         nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
215         return 0;
216 }
217
218 static int
219 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
220                  struct drm_nouveau_gem_info *rep)
221 {
222         struct nouveau_cli *cli = nouveau_cli(file_priv);
223         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
224         struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm;
225         struct nouveau_vma *vma;
226
227         if (is_power_of_2(nvbo->valid_domains))
228                 rep->domain = nvbo->valid_domains;
229         else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
230                 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
231         else
232                 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
233         rep->offset = nvbo->bo.offset;
234         if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
235                 vma = nouveau_vma_find(nvbo, vmm);
236                 if (!vma)
237                         return -EINVAL;
238
239                 rep->offset = vma->addr;
240         }
241
242         rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
243         rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
244         rep->tile_mode = nvbo->mode;
245         rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
246         if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
247                 rep->tile_flags |= nvbo->kind << 8;
248         else
249         if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
250                 rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16;
251         else
252                 rep->tile_flags |= nvbo->zeta;
253         return 0;
254 }
255
256 int
257 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
258                       struct drm_file *file_priv)
259 {
260         struct nouveau_cli *cli = nouveau_cli(file_priv);
261         struct drm_nouveau_gem_new *req = data;
262         struct nouveau_bo *nvbo = NULL;
263         int ret = 0;
264
265         ret = nouveau_gem_new(cli, req->info.size, req->align,
266                               req->info.domain, req->info.tile_mode,
267                               req->info.tile_flags, &nvbo);
268         if (ret)
269                 return ret;
270
271         ret = drm_gem_handle_create(file_priv, &nvbo->bo.base,
272                                     &req->info.handle);
273         if (ret == 0) {
274                 ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info);
275                 if (ret)
276                         drm_gem_handle_delete(file_priv, req->info.handle);
277         }
278
279         /* drop reference from allocate - handle holds it now */
280         drm_gem_object_put_unlocked(&nvbo->bo.base);
281         return ret;
282 }
283
284 static int
285 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
286                        uint32_t write_domains, uint32_t valid_domains)
287 {
288         struct nouveau_bo *nvbo = nouveau_gem_object(gem);
289         struct ttm_buffer_object *bo = &nvbo->bo;
290         uint32_t domains = valid_domains & nvbo->valid_domains &
291                 (write_domains ? write_domains : read_domains);
292         uint32_t pref_flags = 0, valid_flags = 0;
293
294         if (!domains)
295                 return -EINVAL;
296
297         if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
298                 valid_flags |= TTM_PL_FLAG_VRAM;
299
300         if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
301                 valid_flags |= TTM_PL_FLAG_TT;
302
303         if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
304             bo->mem.mem_type == TTM_PL_VRAM)
305                 pref_flags |= TTM_PL_FLAG_VRAM;
306
307         else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
308                  bo->mem.mem_type == TTM_PL_TT)
309                 pref_flags |= TTM_PL_FLAG_TT;
310
311         else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
312                 pref_flags |= TTM_PL_FLAG_VRAM;
313
314         else
315                 pref_flags |= TTM_PL_FLAG_TT;
316
317         nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
318
319         return 0;
320 }
321
322 struct validate_op {
323         struct list_head list;
324         struct ww_acquire_ctx ticket;
325 };
326
327 static void
328 validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
329                         struct nouveau_fence *fence,
330                         struct drm_nouveau_gem_pushbuf_bo *pbbo)
331 {
332         struct nouveau_bo *nvbo;
333         struct drm_nouveau_gem_pushbuf_bo *b;
334
335         while (!list_empty(&op->list)) {
336                 nvbo = list_entry(op->list.next, struct nouveau_bo, entry);
337                 b = &pbbo[nvbo->pbbo_index];
338
339                 if (likely(fence)) {
340                         nouveau_bo_fence(nvbo, fence, !!b->write_domains);
341
342                         if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
343                                 struct nouveau_vma *vma =
344                                         (void *)(unsigned long)b->user_priv;
345                                 nouveau_fence_unref(&vma->fence);
346                                 dma_fence_get(&fence->base);
347                                 vma->fence = fence;
348                         }
349                 }
350
351                 if (unlikely(nvbo->validate_mapped)) {
352                         ttm_bo_kunmap(&nvbo->kmap);
353                         nvbo->validate_mapped = false;
354                 }
355
356                 list_del(&nvbo->entry);
357                 nvbo->reserved_by = NULL;
358                 ttm_bo_unreserve(&nvbo->bo);
359                 drm_gem_object_put_unlocked(&nvbo->bo.base);
360         }
361 }
362
363 static void
364 validate_fini(struct validate_op *op, struct nouveau_channel *chan,
365               struct nouveau_fence *fence,
366               struct drm_nouveau_gem_pushbuf_bo *pbbo)
367 {
368         validate_fini_no_ticket(op, chan, fence, pbbo);
369         ww_acquire_fini(&op->ticket);
370 }
371
372 static int
373 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
374               struct drm_nouveau_gem_pushbuf_bo *pbbo,
375               int nr_buffers, struct validate_op *op)
376 {
377         struct nouveau_cli *cli = nouveau_cli(file_priv);
378         int trycnt = 0;
379         int ret = -EINVAL, i;
380         struct nouveau_bo *res_bo = NULL;
381         LIST_HEAD(gart_list);
382         LIST_HEAD(vram_list);
383         LIST_HEAD(both_list);
384
385         ww_acquire_init(&op->ticket, &reservation_ww_class);
386 retry:
387         if (++trycnt > 100000) {
388                 NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__);
389                 return -EINVAL;
390         }
391
392         for (i = 0; i < nr_buffers; i++) {
393                 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
394                 struct drm_gem_object *gem;
395                 struct nouveau_bo *nvbo;
396
397                 gem = drm_gem_object_lookup(file_priv, b->handle);
398                 if (!gem) {
399                         NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle);
400                         ret = -ENOENT;
401                         break;
402                 }
403                 nvbo = nouveau_gem_object(gem);
404                 if (nvbo == res_bo) {
405                         res_bo = NULL;
406                         drm_gem_object_put_unlocked(gem);
407                         continue;
408                 }
409
410                 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
411                         NV_PRINTK(err, cli, "multiple instances of buffer %d on "
412                                       "validation list\n", b->handle);
413                         drm_gem_object_put_unlocked(gem);
414                         ret = -EINVAL;
415                         break;
416                 }
417
418                 ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket);
419                 if (ret) {
420                         list_splice_tail_init(&vram_list, &op->list);
421                         list_splice_tail_init(&gart_list, &op->list);
422                         list_splice_tail_init(&both_list, &op->list);
423                         validate_fini_no_ticket(op, chan, NULL, NULL);
424                         if (unlikely(ret == -EDEADLK)) {
425                                 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
426                                                               &op->ticket);
427                                 if (!ret)
428                                         res_bo = nvbo;
429                         }
430                         if (unlikely(ret)) {
431                                 if (ret != -ERESTARTSYS)
432                                         NV_PRINTK(err, cli, "fail reserve\n");
433                                 break;
434                         }
435                 }
436
437                 if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) {
438                         struct nouveau_vmm *vmm = chan->vmm;
439                         struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm);
440                         if (!vma) {
441                                 NV_PRINTK(err, cli, "vma not found!\n");
442                                 ret = -EINVAL;
443                                 break;
444                         }
445
446                         b->user_priv = (uint64_t)(unsigned long)vma;
447                 } else {
448                         b->user_priv = (uint64_t)(unsigned long)nvbo;
449                 }
450
451                 nvbo->reserved_by = file_priv;
452                 nvbo->pbbo_index = i;
453                 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
454                     (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
455                         list_add_tail(&nvbo->entry, &both_list);
456                 else
457                 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
458                         list_add_tail(&nvbo->entry, &vram_list);
459                 else
460                 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
461                         list_add_tail(&nvbo->entry, &gart_list);
462                 else {
463                         NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n",
464                                  b->valid_domains);
465                         list_add_tail(&nvbo->entry, &both_list);
466                         ret = -EINVAL;
467                         break;
468                 }
469                 if (nvbo == res_bo)
470                         goto retry;
471         }
472
473         ww_acquire_done(&op->ticket);
474         list_splice_tail(&vram_list, &op->list);
475         list_splice_tail(&gart_list, &op->list);
476         list_splice_tail(&both_list, &op->list);
477         if (ret)
478                 validate_fini(op, chan, NULL, NULL);
479         return ret;
480
481 }
482
483 static int
484 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
485               struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
486               uint64_t user_pbbo_ptr)
487 {
488         struct nouveau_drm *drm = chan->drm;
489         struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
490                                 (void __force __user *)(uintptr_t)user_pbbo_ptr;
491         struct nouveau_bo *nvbo;
492         int ret, relocs = 0;
493
494         list_for_each_entry(nvbo, list, entry) {
495                 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
496
497                 ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains,
498                                              b->write_domains,
499                                              b->valid_domains);
500                 if (unlikely(ret)) {
501                         NV_PRINTK(err, cli, "fail set_domain\n");
502                         return ret;
503                 }
504
505                 ret = nouveau_bo_validate(nvbo, true, false);
506                 if (unlikely(ret)) {
507                         if (ret != -ERESTARTSYS)
508                                 NV_PRINTK(err, cli, "fail ttm_validate\n");
509                         return ret;
510                 }
511
512                 ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true);
513                 if (unlikely(ret)) {
514                         if (ret != -ERESTARTSYS)
515                                 NV_PRINTK(err, cli, "fail post-validate sync\n");
516                         return ret;
517                 }
518
519                 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
520                         if (nvbo->bo.offset == b->presumed.offset &&
521                             ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
522                               b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
523                              (nvbo->bo.mem.mem_type == TTM_PL_TT &&
524                               b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
525                                 continue;
526
527                         if (nvbo->bo.mem.mem_type == TTM_PL_TT)
528                                 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
529                         else
530                                 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
531                         b->presumed.offset = nvbo->bo.offset;
532                         b->presumed.valid = 0;
533                         relocs++;
534
535                         if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed,
536                                              &b->presumed, sizeof(b->presumed)))
537                                 return -EFAULT;
538                 }
539         }
540
541         return relocs;
542 }
543
544 static int
545 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
546                              struct drm_file *file_priv,
547                              struct drm_nouveau_gem_pushbuf_bo *pbbo,
548                              uint64_t user_buffers, int nr_buffers,
549                              struct validate_op *op, int *apply_relocs)
550 {
551         struct nouveau_cli *cli = nouveau_cli(file_priv);
552         int ret;
553
554         INIT_LIST_HEAD(&op->list);
555
556         if (nr_buffers == 0)
557                 return 0;
558
559         ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
560         if (unlikely(ret)) {
561                 if (ret != -ERESTARTSYS)
562                         NV_PRINTK(err, cli, "validate_init\n");
563                 return ret;
564         }
565
566         ret = validate_list(chan, cli, &op->list, pbbo, user_buffers);
567         if (unlikely(ret < 0)) {
568                 if (ret != -ERESTARTSYS)
569                         NV_PRINTK(err, cli, "validating bo list\n");
570                 validate_fini(op, chan, NULL, NULL);
571                 return ret;
572         }
573         *apply_relocs = ret;
574         return 0;
575 }
576
577 static inline void
578 u_free(void *addr)
579 {
580         kvfree(addr);
581 }
582
583 static inline void *
584 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
585 {
586         void *mem;
587         void __user *userptr = (void __force __user *)(uintptr_t)user;
588
589         size *= nmemb;
590
591         mem = kvmalloc(size, GFP_KERNEL);
592         if (!mem)
593                 return ERR_PTR(-ENOMEM);
594
595         if (copy_from_user(mem, userptr, size)) {
596                 u_free(mem);
597                 return ERR_PTR(-EFAULT);
598         }
599
600         return mem;
601 }
602
603 static int
604 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
605                                 struct drm_nouveau_gem_pushbuf *req,
606                                 struct drm_nouveau_gem_pushbuf_bo *bo)
607 {
608         struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
609         int ret = 0;
610         unsigned i;
611
612         reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
613         if (IS_ERR(reloc))
614                 return PTR_ERR(reloc);
615
616         for (i = 0; i < req->nr_relocs; i++) {
617                 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
618                 struct drm_nouveau_gem_pushbuf_bo *b;
619                 struct nouveau_bo *nvbo;
620                 uint32_t data;
621
622                 if (unlikely(r->bo_index >= req->nr_buffers)) {
623                         NV_PRINTK(err, cli, "reloc bo index invalid\n");
624                         ret = -EINVAL;
625                         break;
626                 }
627
628                 b = &bo[r->bo_index];
629                 if (b->presumed.valid)
630                         continue;
631
632                 if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
633                         NV_PRINTK(err, cli, "reloc container bo index invalid\n");
634                         ret = -EINVAL;
635                         break;
636                 }
637                 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
638
639                 if (unlikely(r->reloc_bo_offset + 4 >
640                              nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
641                         NV_PRINTK(err, cli, "reloc outside of bo\n");
642                         ret = -EINVAL;
643                         break;
644                 }
645
646                 if (!nvbo->kmap.virtual) {
647                         ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
648                                           &nvbo->kmap);
649                         if (ret) {
650                                 NV_PRINTK(err, cli, "failed kmap for reloc\n");
651                                 break;
652                         }
653                         nvbo->validate_mapped = true;
654                 }
655
656                 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
657                         data = b->presumed.offset + r->data;
658                 else
659                 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
660                         data = (b->presumed.offset + r->data) >> 32;
661                 else
662                         data = r->data;
663
664                 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
665                         if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
666                                 data |= r->tor;
667                         else
668                                 data |= r->vor;
669                 }
670
671                 ret = ttm_bo_wait(&nvbo->bo, false, false);
672                 if (ret) {
673                         NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
674                         break;
675                 }
676
677                 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
678         }
679
680         u_free(reloc);
681         return ret;
682 }
683
684 int
685 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
686                           struct drm_file *file_priv)
687 {
688         struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv);
689         struct nouveau_cli *cli = nouveau_cli(file_priv);
690         struct nouveau_abi16_chan *temp;
691         struct nouveau_drm *drm = nouveau_drm(dev);
692         struct drm_nouveau_gem_pushbuf *req = data;
693         struct drm_nouveau_gem_pushbuf_push *push;
694         struct drm_nouveau_gem_pushbuf_bo *bo;
695         struct nouveau_channel *chan = NULL;
696         struct validate_op op;
697         struct nouveau_fence *fence = NULL;
698         int i, j, ret = 0, do_reloc = 0;
699
700         if (unlikely(!abi16))
701                 return -ENOMEM;
702
703         list_for_each_entry(temp, &abi16->channels, head) {
704                 if (temp->chan->chid == req->channel) {
705                         chan = temp->chan;
706                         break;
707                 }
708         }
709
710         if (!chan)
711                 return nouveau_abi16_put(abi16, -ENOENT);
712
713         req->vram_available = drm->gem.vram_available;
714         req->gart_available = drm->gem.gart_available;
715         if (unlikely(req->nr_push == 0))
716                 goto out_next;
717
718         if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
719                 NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n",
720                          req->nr_push, NOUVEAU_GEM_MAX_PUSH);
721                 return nouveau_abi16_put(abi16, -EINVAL);
722         }
723
724         if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
725                 NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n",
726                          req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
727                 return nouveau_abi16_put(abi16, -EINVAL);
728         }
729
730         if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
731                 NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n",
732                          req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
733                 return nouveau_abi16_put(abi16, -EINVAL);
734         }
735
736         push = u_memcpya(req->push, req->nr_push, sizeof(*push));
737         if (IS_ERR(push))
738                 return nouveau_abi16_put(abi16, PTR_ERR(push));
739
740         bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
741         if (IS_ERR(bo)) {
742                 u_free(push);
743                 return nouveau_abi16_put(abi16, PTR_ERR(bo));
744         }
745
746         /* Ensure all push buffers are on validate list */
747         for (i = 0; i < req->nr_push; i++) {
748                 if (push[i].bo_index >= req->nr_buffers) {
749                         NV_PRINTK(err, cli, "push %d buffer not in list\n", i);
750                         ret = -EINVAL;
751                         goto out_prevalid;
752                 }
753         }
754
755         /* Validate buffer list */
756         ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
757                                            req->nr_buffers, &op, &do_reloc);
758         if (ret) {
759                 if (ret != -ERESTARTSYS)
760                         NV_PRINTK(err, cli, "validate: %d\n", ret);
761                 goto out_prevalid;
762         }
763
764         /* Apply any relocations that are required */
765         if (do_reloc) {
766                 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
767                 if (ret) {
768                         NV_PRINTK(err, cli, "reloc apply: %d\n", ret);
769                         goto out;
770                 }
771         }
772
773         if (chan->dma.ib_max) {
774                 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
775                 if (ret) {
776                         NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret);
777                         goto out;
778                 }
779
780                 for (i = 0; i < req->nr_push; i++) {
781                         struct nouveau_vma *vma = (void *)(unsigned long)
782                                 bo[push[i].bo_index].user_priv;
783
784                         nv50_dma_push(chan, vma->addr + push[i].offset,
785                                       push[i].length);
786                 }
787         } else
788         if (drm->client.device.info.chipset >= 0x25) {
789                 ret = RING_SPACE(chan, req->nr_push * 2);
790                 if (ret) {
791                         NV_PRINTK(err, cli, "cal_space: %d\n", ret);
792                         goto out;
793                 }
794
795                 for (i = 0; i < req->nr_push; i++) {
796                         struct nouveau_bo *nvbo = (void *)(unsigned long)
797                                 bo[push[i].bo_index].user_priv;
798
799                         OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
800                         OUT_RING(chan, 0);
801                 }
802         } else {
803                 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
804                 if (ret) {
805                         NV_PRINTK(err, cli, "jmp_space: %d\n", ret);
806                         goto out;
807                 }
808
809                 for (i = 0; i < req->nr_push; i++) {
810                         struct nouveau_bo *nvbo = (void *)(unsigned long)
811                                 bo[push[i].bo_index].user_priv;
812                         uint32_t cmd;
813
814                         cmd = chan->push.addr + ((chan->dma.cur + 2) << 2);
815                         cmd |= 0x20000000;
816                         if (unlikely(cmd != req->suffix0)) {
817                                 if (!nvbo->kmap.virtual) {
818                                         ret = ttm_bo_kmap(&nvbo->bo, 0,
819                                                           nvbo->bo.mem.
820                                                           num_pages,
821                                                           &nvbo->kmap);
822                                         if (ret) {
823                                                 WIND_RING(chan);
824                                                 goto out;
825                                         }
826                                         nvbo->validate_mapped = true;
827                                 }
828
829                                 nouveau_bo_wr32(nvbo, (push[i].offset +
830                                                 push[i].length - 8) / 4, cmd);
831                         }
832
833                         OUT_RING(chan, 0x20000000 |
834                                       (nvbo->bo.offset + push[i].offset));
835                         OUT_RING(chan, 0);
836                         for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
837                                 OUT_RING(chan, 0);
838                 }
839         }
840
841         ret = nouveau_fence_new(chan, false, &fence);
842         if (ret) {
843                 NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret);
844                 WIND_RING(chan);
845                 goto out;
846         }
847
848 out:
849         validate_fini(&op, chan, fence, bo);
850         nouveau_fence_unref(&fence);
851
852 out_prevalid:
853         u_free(bo);
854         u_free(push);
855
856 out_next:
857         if (chan->dma.ib_max) {
858                 req->suffix0 = 0x00000000;
859                 req->suffix1 = 0x00000000;
860         } else
861         if (drm->client.device.info.chipset >= 0x25) {
862                 req->suffix0 = 0x00020000;
863                 req->suffix1 = 0x00000000;
864         } else {
865                 req->suffix0 = 0x20000000 |
866                               (chan->push.addr + ((chan->dma.cur + 2) << 2));
867                 req->suffix1 = 0x00000000;
868         }
869
870         return nouveau_abi16_put(abi16, ret);
871 }
872
873 int
874 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
875                            struct drm_file *file_priv)
876 {
877         struct drm_nouveau_gem_cpu_prep *req = data;
878         struct drm_gem_object *gem;
879         struct nouveau_bo *nvbo;
880         bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
881         bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
882         long lret;
883         int ret;
884
885         gem = drm_gem_object_lookup(file_priv, req->handle);
886         if (!gem)
887                 return -ENOENT;
888         nvbo = nouveau_gem_object(gem);
889
890         lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
891                                                    no_wait ? 0 : 30 * HZ);
892         if (!lret)
893                 ret = -EBUSY;
894         else if (lret > 0)
895                 ret = 0;
896         else
897                 ret = lret;
898
899         nouveau_bo_sync_for_cpu(nvbo);
900         drm_gem_object_put_unlocked(gem);
901
902         return ret;
903 }
904
905 int
906 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
907                            struct drm_file *file_priv)
908 {
909         struct drm_nouveau_gem_cpu_fini *req = data;
910         struct drm_gem_object *gem;
911         struct nouveau_bo *nvbo;
912
913         gem = drm_gem_object_lookup(file_priv, req->handle);
914         if (!gem)
915                 return -ENOENT;
916         nvbo = nouveau_gem_object(gem);
917
918         nouveau_bo_sync_for_device(nvbo);
919         drm_gem_object_put_unlocked(gem);
920         return 0;
921 }
922
923 int
924 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
925                        struct drm_file *file_priv)
926 {
927         struct drm_nouveau_gem_info *req = data;
928         struct drm_gem_object *gem;
929         int ret;
930
931         gem = drm_gem_object_lookup(file_priv, req->handle);
932         if (!gem)
933                 return -ENOENT;
934
935         ret = nouveau_gem_info(file_priv, gem, req);
936         drm_gem_object_put_unlocked(gem);
937         return ret;
938 }
939