727bb3f5ceb28b2c3a85f463285e56aacd97993e
[linux-2.6-microblaze.git] / drivers / gpu / drm / etnaviv / etnaviv_gem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5
6 #include <drm/drm_prime.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
11
12 #include "etnaviv_drv.h"
13 #include "etnaviv_gem.h"
14 #include "etnaviv_gpu.h"
15 #include "etnaviv_mmu.h"
16
17 static struct lock_class_key etnaviv_shm_lock_class;
18 static struct lock_class_key etnaviv_userptr_lock_class;
19
20 static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
21 {
22         struct drm_device *dev = etnaviv_obj->base.dev;
23         struct sg_table *sgt = etnaviv_obj->sgt;
24
25         /*
26          * For non-cached buffers, ensure the new pages are clean
27          * because display controller, GPU, etc. are not coherent.
28          */
29         if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
30                 dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
31 }
32
33 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
34 {
35         struct drm_device *dev = etnaviv_obj->base.dev;
36         struct sg_table *sgt = etnaviv_obj->sgt;
37
38         /*
39          * For non-cached buffers, ensure the new pages are clean
40          * because display controller, GPU, etc. are not coherent:
41          *
42          * WARNING: The DMA API does not support concurrent CPU
43          * and device access to the memory area.  With BIDIRECTIONAL,
44          * we will clean the cache lines which overlap the region,
45          * and invalidate all cache lines (partially) contained in
46          * the region.
47          *
48          * If you have dirty data in the overlapping cache lines,
49          * that will corrupt the GPU-written data.  If you have
50          * written into the remainder of the region, this can
51          * discard those writes.
52          */
53         if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
54                 dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
55 }
56
57 /* called with etnaviv_obj->lock held */
58 static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
59 {
60         struct drm_device *dev = etnaviv_obj->base.dev;
61         struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
62
63         if (IS_ERR(p)) {
64                 dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
65                 return PTR_ERR(p);
66         }
67
68         etnaviv_obj->pages = p;
69
70         return 0;
71 }
72
73 static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
74 {
75         if (etnaviv_obj->sgt) {
76                 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
77                 sg_free_table(etnaviv_obj->sgt);
78                 kfree(etnaviv_obj->sgt);
79                 etnaviv_obj->sgt = NULL;
80         }
81         if (etnaviv_obj->pages) {
82                 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
83                                   true, false);
84
85                 etnaviv_obj->pages = NULL;
86         }
87 }
88
89 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
90 {
91         int ret;
92
93         lockdep_assert_held(&etnaviv_obj->lock);
94
95         if (!etnaviv_obj->pages) {
96                 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
97                 if (ret < 0)
98                         return ERR_PTR(ret);
99         }
100
101         if (!etnaviv_obj->sgt) {
102                 struct drm_device *dev = etnaviv_obj->base.dev;
103                 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
104                 struct sg_table *sgt;
105
106                 sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
107                 if (IS_ERR(sgt)) {
108                         dev_err(dev->dev, "failed to allocate sgt: %ld\n",
109                                 PTR_ERR(sgt));
110                         return ERR_CAST(sgt);
111                 }
112
113                 etnaviv_obj->sgt = sgt;
114
115                 etnaviv_gem_scatter_map(etnaviv_obj);
116         }
117
118         return etnaviv_obj->pages;
119 }
120
121 void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
122 {
123         lockdep_assert_held(&etnaviv_obj->lock);
124         /* when we start tracking the pin count, then do something here */
125 }
126
127 static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
128                 struct vm_area_struct *vma)
129 {
130         pgprot_t vm_page_prot;
131
132         vma->vm_flags &= ~VM_PFNMAP;
133         vma->vm_flags |= VM_MIXEDMAP;
134
135         vm_page_prot = vm_get_page_prot(vma->vm_flags);
136
137         if (etnaviv_obj->flags & ETNA_BO_WC) {
138                 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
139         } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
140                 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
141         } else {
142                 /*
143                  * Shunt off cached objs to shmem file so they have their own
144                  * address_space (so unmap_mapping_range does what we want,
145                  * in particular in the case of mmap'd dmabufs)
146                  */
147                 fput(vma->vm_file);
148                 get_file(etnaviv_obj->base.filp);
149                 vma->vm_pgoff = 0;
150                 vma->vm_file  = etnaviv_obj->base.filp;
151
152                 vma->vm_page_prot = vm_page_prot;
153         }
154
155         return 0;
156 }
157
158 int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
159 {
160         struct etnaviv_gem_object *obj;
161         int ret;
162
163         ret = drm_gem_mmap(filp, vma);
164         if (ret) {
165                 DBG("mmap failed: %d", ret);
166                 return ret;
167         }
168
169         obj = to_etnaviv_bo(vma->vm_private_data);
170         return obj->ops->mmap(obj, vma);
171 }
172
173 vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
174 {
175         struct vm_area_struct *vma = vmf->vma;
176         struct drm_gem_object *obj = vma->vm_private_data;
177         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
178         struct page **pages, *page;
179         pgoff_t pgoff;
180         int err;
181
182         /*
183          * Make sure we don't parallel update on a fault, nor move or remove
184          * something from beneath our feet.  Note that vmf_insert_page() is
185          * specifically coded to take care of this, so we don't have to.
186          */
187         err = mutex_lock_interruptible(&etnaviv_obj->lock);
188         if (err)
189                 return VM_FAULT_NOPAGE;
190         /* make sure we have pages attached now */
191         pages = etnaviv_gem_get_pages(etnaviv_obj);
192         mutex_unlock(&etnaviv_obj->lock);
193
194         if (IS_ERR(pages)) {
195                 err = PTR_ERR(pages);
196                 return vmf_error(err);
197         }
198
199         /* We don't use vmf->pgoff since that has the fake offset: */
200         pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
201
202         page = pages[pgoff];
203
204         VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
205              page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
206
207         return vmf_insert_page(vma, vmf->address, page);
208 }
209
210 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
211 {
212         int ret;
213
214         /* Make it mmapable */
215         ret = drm_gem_create_mmap_offset(obj);
216         if (ret)
217                 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
218         else
219                 *offset = drm_vma_node_offset_addr(&obj->vma_node);
220
221         return ret;
222 }
223
224 static struct etnaviv_vram_mapping *
225 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
226                              struct etnaviv_iommu *mmu)
227 {
228         struct etnaviv_vram_mapping *mapping;
229
230         list_for_each_entry(mapping, &obj->vram_list, obj_node) {
231                 if (mapping->mmu == mmu)
232                         return mapping;
233         }
234
235         return NULL;
236 }
237
238 void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
239 {
240         struct etnaviv_gem_object *etnaviv_obj = mapping->object;
241
242         drm_gem_object_get(&etnaviv_obj->base);
243
244         mutex_lock(&etnaviv_obj->lock);
245         WARN_ON(mapping->use == 0);
246         mapping->use += 1;
247         mutex_unlock(&etnaviv_obj->lock);
248 }
249
250 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
251 {
252         struct etnaviv_gem_object *etnaviv_obj = mapping->object;
253
254         mutex_lock(&etnaviv_obj->lock);
255         WARN_ON(mapping->use == 0);
256         mapping->use -= 1;
257         mutex_unlock(&etnaviv_obj->lock);
258
259         drm_gem_object_put_unlocked(&etnaviv_obj->base);
260 }
261
262 struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
263         struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
264 {
265         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
266         struct etnaviv_vram_mapping *mapping;
267         struct page **pages;
268         int ret = 0;
269
270         mutex_lock(&etnaviv_obj->lock);
271         mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
272         if (mapping) {
273                 /*
274                  * Holding the object lock prevents the use count changing
275                  * beneath us.  If the use count is zero, the MMU might be
276                  * reaping this object, so take the lock and re-check that
277                  * the MMU owns this mapping to close this race.
278                  */
279                 if (mapping->use == 0) {
280                         mutex_lock(&gpu->mmu->lock);
281                         if (mapping->mmu == gpu->mmu)
282                                 mapping->use += 1;
283                         else
284                                 mapping = NULL;
285                         mutex_unlock(&gpu->mmu->lock);
286                         if (mapping)
287                                 goto out;
288                 } else {
289                         mapping->use += 1;
290                         goto out;
291                 }
292         }
293
294         pages = etnaviv_gem_get_pages(etnaviv_obj);
295         if (IS_ERR(pages)) {
296                 ret = PTR_ERR(pages);
297                 goto out;
298         }
299
300         /*
301          * See if we have a reaped vram mapping we can re-use before
302          * allocating a fresh mapping.
303          */
304         mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
305         if (!mapping) {
306                 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
307                 if (!mapping) {
308                         ret = -ENOMEM;
309                         goto out;
310                 }
311
312                 INIT_LIST_HEAD(&mapping->scan_node);
313                 mapping->object = etnaviv_obj;
314         } else {
315                 list_del(&mapping->obj_node);
316         }
317
318         mapping->mmu = gpu->mmu;
319         mapping->use = 1;
320
321         ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base,
322                                     mapping);
323         if (ret < 0)
324                 kfree(mapping);
325         else
326                 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
327
328 out:
329         mutex_unlock(&etnaviv_obj->lock);
330
331         if (ret)
332                 return ERR_PTR(ret);
333
334         /* Take a reference on the object */
335         drm_gem_object_get(obj);
336         return mapping;
337 }
338
339 void *etnaviv_gem_vmap(struct drm_gem_object *obj)
340 {
341         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
342
343         if (etnaviv_obj->vaddr)
344                 return etnaviv_obj->vaddr;
345
346         mutex_lock(&etnaviv_obj->lock);
347         /*
348          * Need to check again, as we might have raced with another thread
349          * while waiting for the mutex.
350          */
351         if (!etnaviv_obj->vaddr)
352                 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
353         mutex_unlock(&etnaviv_obj->lock);
354
355         return etnaviv_obj->vaddr;
356 }
357
358 static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
359 {
360         struct page **pages;
361
362         lockdep_assert_held(&obj->lock);
363
364         pages = etnaviv_gem_get_pages(obj);
365         if (IS_ERR(pages))
366                 return NULL;
367
368         return vmap(pages, obj->base.size >> PAGE_SHIFT,
369                         VM_MAP, pgprot_writecombine(PAGE_KERNEL));
370 }
371
372 static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
373 {
374         if (op & ETNA_PREP_READ)
375                 return DMA_FROM_DEVICE;
376         else if (op & ETNA_PREP_WRITE)
377                 return DMA_TO_DEVICE;
378         else
379                 return DMA_BIDIRECTIONAL;
380 }
381
382 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
383                 struct timespec *timeout)
384 {
385         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
386         struct drm_device *dev = obj->dev;
387         bool write = !!(op & ETNA_PREP_WRITE);
388         int ret;
389
390         if (!etnaviv_obj->sgt) {
391                 void *ret;
392
393                 mutex_lock(&etnaviv_obj->lock);
394                 ret = etnaviv_gem_get_pages(etnaviv_obj);
395                 mutex_unlock(&etnaviv_obj->lock);
396                 if (IS_ERR(ret))
397                         return PTR_ERR(ret);
398         }
399
400         if (op & ETNA_PREP_NOSYNC) {
401                 if (!reservation_object_test_signaled_rcu(obj->resv,
402                                                           write))
403                         return -EBUSY;
404         } else {
405                 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
406
407                 ret = reservation_object_wait_timeout_rcu(obj->resv,
408                                                           write, true, remain);
409                 if (ret <= 0)
410                         return ret == 0 ? -ETIMEDOUT : ret;
411         }
412
413         if (etnaviv_obj->flags & ETNA_BO_CACHED) {
414                 dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
415                                     etnaviv_obj->sgt->nents,
416                                     etnaviv_op_to_dma_dir(op));
417                 etnaviv_obj->last_cpu_prep_op = op;
418         }
419
420         return 0;
421 }
422
423 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
424 {
425         struct drm_device *dev = obj->dev;
426         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
427
428         if (etnaviv_obj->flags & ETNA_BO_CACHED) {
429                 /* fini without a prep is almost certainly a userspace error */
430                 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
431                 dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
432                         etnaviv_obj->sgt->nents,
433                         etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
434                 etnaviv_obj->last_cpu_prep_op = 0;
435         }
436
437         return 0;
438 }
439
440 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
441         struct timespec *timeout)
442 {
443         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
444
445         return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
446 }
447
448 #ifdef CONFIG_DEBUG_FS
449 static void etnaviv_gem_describe_fence(struct dma_fence *fence,
450         const char *type, struct seq_file *m)
451 {
452         if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
453                 seq_printf(m, "\t%9s: %s %s seq %llu\n",
454                            type,
455                            fence->ops->get_driver_name(fence),
456                            fence->ops->get_timeline_name(fence),
457                            fence->seqno);
458 }
459
460 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
461 {
462         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
463         struct reservation_object *robj = obj->resv;
464         struct reservation_object_list *fobj;
465         struct dma_fence *fence;
466         unsigned long off = drm_vma_node_start(&obj->vma_node);
467
468         seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
469                         etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
470                         obj->name, kref_read(&obj->refcount),
471                         off, etnaviv_obj->vaddr, obj->size);
472
473         rcu_read_lock();
474         fobj = rcu_dereference(robj->fence);
475         if (fobj) {
476                 unsigned int i, shared_count = fobj->shared_count;
477
478                 for (i = 0; i < shared_count; i++) {
479                         fence = rcu_dereference(fobj->shared[i]);
480                         etnaviv_gem_describe_fence(fence, "Shared", m);
481                 }
482         }
483
484         fence = rcu_dereference(robj->fence_excl);
485         if (fence)
486                 etnaviv_gem_describe_fence(fence, "Exclusive", m);
487         rcu_read_unlock();
488 }
489
490 void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
491         struct seq_file *m)
492 {
493         struct etnaviv_gem_object *etnaviv_obj;
494         int count = 0;
495         size_t size = 0;
496
497         mutex_lock(&priv->gem_lock);
498         list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
499                 struct drm_gem_object *obj = &etnaviv_obj->base;
500
501                 seq_puts(m, "   ");
502                 etnaviv_gem_describe(obj, m);
503                 count++;
504                 size += obj->size;
505         }
506         mutex_unlock(&priv->gem_lock);
507
508         seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
509 }
510 #endif
511
512 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
513 {
514         vunmap(etnaviv_obj->vaddr);
515         put_pages(etnaviv_obj);
516 }
517
518 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
519         .get_pages = etnaviv_gem_shmem_get_pages,
520         .release = etnaviv_gem_shmem_release,
521         .vmap = etnaviv_gem_vmap_impl,
522         .mmap = etnaviv_gem_mmap_obj,
523 };
524
525 void etnaviv_gem_free_object(struct drm_gem_object *obj)
526 {
527         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
528         struct etnaviv_drm_private *priv = obj->dev->dev_private;
529         struct etnaviv_vram_mapping *mapping, *tmp;
530
531         /* object should not be active */
532         WARN_ON(is_active(etnaviv_obj));
533
534         mutex_lock(&priv->gem_lock);
535         list_del(&etnaviv_obj->gem_node);
536         mutex_unlock(&priv->gem_lock);
537
538         list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
539                                  obj_node) {
540                 struct etnaviv_iommu *mmu = mapping->mmu;
541
542                 WARN_ON(mapping->use);
543
544                 if (mmu)
545                         etnaviv_iommu_unmap_gem(mmu, mapping);
546
547                 list_del(&mapping->obj_node);
548                 kfree(mapping);
549         }
550
551         drm_gem_free_mmap_offset(obj);
552         etnaviv_obj->ops->release(etnaviv_obj);
553         drm_gem_object_release(obj);
554
555         kfree(etnaviv_obj);
556 }
557
558 void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
559 {
560         struct etnaviv_drm_private *priv = dev->dev_private;
561         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
562
563         mutex_lock(&priv->gem_lock);
564         list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
565         mutex_unlock(&priv->gem_lock);
566 }
567
568 static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
569         struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
570         struct drm_gem_object **obj)
571 {
572         struct etnaviv_gem_object *etnaviv_obj;
573         unsigned sz = sizeof(*etnaviv_obj);
574         bool valid = true;
575
576         /* validate flags */
577         switch (flags & ETNA_BO_CACHE_MASK) {
578         case ETNA_BO_UNCACHED:
579         case ETNA_BO_CACHED:
580         case ETNA_BO_WC:
581                 break;
582         default:
583                 valid = false;
584         }
585
586         if (!valid) {
587                 dev_err(dev->dev, "invalid cache flag: %x\n",
588                         (flags & ETNA_BO_CACHE_MASK));
589                 return -EINVAL;
590         }
591
592         etnaviv_obj = kzalloc(sz, GFP_KERNEL);
593         if (!etnaviv_obj)
594                 return -ENOMEM;
595
596         etnaviv_obj->flags = flags;
597         etnaviv_obj->ops = ops;
598         if (robj)
599                 etnaviv_obj->base.resv = robj;
600
601         mutex_init(&etnaviv_obj->lock);
602         INIT_LIST_HEAD(&etnaviv_obj->vram_list);
603
604         *obj = &etnaviv_obj->base;
605
606         return 0;
607 }
608
609 /* convenience method to construct a GEM buffer object, and userspace handle */
610 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
611         u32 size, u32 flags, u32 *handle)
612 {
613         struct drm_gem_object *obj = NULL;
614         int ret;
615
616         size = PAGE_ALIGN(size);
617
618         ret = etnaviv_gem_new_impl(dev, size, flags, NULL,
619                                    &etnaviv_gem_shmem_ops, &obj);
620         if (ret)
621                 goto fail;
622
623         lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
624
625         ret = drm_gem_object_init(dev, obj, size);
626         if (ret)
627                 goto fail;
628
629         /*
630          * Our buffers are kept pinned, so allocating them from the MOVABLE
631          * zone is a really bad idea, and conflicts with CMA. See comments
632          * above new_inode() why this is required _and_ expected if you're
633          * going to pin these pages.
634          */
635         mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
636                              __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
637
638         etnaviv_gem_obj_add(dev, obj);
639
640         ret = drm_gem_handle_create(file, obj, handle);
641
642         /* drop reference from allocate - handle holds it now */
643 fail:
644         drm_gem_object_put_unlocked(obj);
645
646         return ret;
647 }
648
649 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
650         struct reservation_object *robj, const struct etnaviv_gem_ops *ops,
651         struct etnaviv_gem_object **res)
652 {
653         struct drm_gem_object *obj;
654         int ret;
655
656         ret = etnaviv_gem_new_impl(dev, size, flags, robj, ops, &obj);
657         if (ret)
658                 return ret;
659
660         drm_gem_private_object_init(dev, obj, size);
661
662         *res = to_etnaviv_bo(obj);
663
664         return 0;
665 }
666
667 static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
668 {
669         struct page **pvec = NULL;
670         struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
671         int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
672
673         might_lock_read(&current->mm->mmap_sem);
674
675         if (userptr->mm != current->mm)
676                 return -EPERM;
677
678         pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
679         if (!pvec)
680                 return -ENOMEM;
681
682         do {
683                 unsigned num_pages = npages - pinned;
684                 uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
685                 struct page **pages = pvec + pinned;
686
687                 ret = get_user_pages_fast(ptr, num_pages,
688                                           !userptr->ro ? FOLL_WRITE : 0, pages);
689                 if (ret < 0) {
690                         release_pages(pvec, pinned);
691                         kvfree(pvec);
692                         return ret;
693                 }
694
695                 pinned += ret;
696
697         } while (pinned < npages);
698
699         etnaviv_obj->pages = pvec;
700
701         return 0;
702 }
703
704 static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
705 {
706         if (etnaviv_obj->sgt) {
707                 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
708                 sg_free_table(etnaviv_obj->sgt);
709                 kfree(etnaviv_obj->sgt);
710         }
711         if (etnaviv_obj->pages) {
712                 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
713
714                 release_pages(etnaviv_obj->pages, npages);
715                 kvfree(etnaviv_obj->pages);
716         }
717 }
718
719 static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
720                 struct vm_area_struct *vma)
721 {
722         return -EINVAL;
723 }
724
725 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
726         .get_pages = etnaviv_gem_userptr_get_pages,
727         .release = etnaviv_gem_userptr_release,
728         .vmap = etnaviv_gem_vmap_impl,
729         .mmap = etnaviv_gem_userptr_mmap_obj,
730 };
731
732 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
733         uintptr_t ptr, u32 size, u32 flags, u32 *handle)
734 {
735         struct etnaviv_gem_object *etnaviv_obj;
736         int ret;
737
738         ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, NULL,
739                                       &etnaviv_gem_userptr_ops, &etnaviv_obj);
740         if (ret)
741                 return ret;
742
743         lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
744
745         etnaviv_obj->userptr.ptr = ptr;
746         etnaviv_obj->userptr.mm = current->mm;
747         etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
748
749         etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
750
751         ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
752
753         /* drop reference from allocate - handle holds it now */
754         drm_gem_object_put_unlocked(&etnaviv_obj->base);
755         return ret;
756 }