67d9a2b9ea6a133610b5748d6d825fca33e59e34
[linux-2.6-microblaze.git] / drivers / gpu / drm / etnaviv / etnaviv_gem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5
6 #include <drm/drm_prime.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/spinlock.h>
10 #include <linux/vmalloc.h>
11
12 #include "etnaviv_drv.h"
13 #include "etnaviv_gem.h"
14 #include "etnaviv_gpu.h"
15 #include "etnaviv_mmu.h"
16
17 static struct lock_class_key etnaviv_shm_lock_class;
18 static struct lock_class_key etnaviv_userptr_lock_class;
19
20 static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
21 {
22         struct drm_device *dev = etnaviv_obj->base.dev;
23         struct sg_table *sgt = etnaviv_obj->sgt;
24
25         /*
26          * For non-cached buffers, ensure the new pages are clean
27          * because display controller, GPU, etc. are not coherent.
28          */
29         if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
30                 dma_map_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
31 }
32
33 static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
34 {
35         struct drm_device *dev = etnaviv_obj->base.dev;
36         struct sg_table *sgt = etnaviv_obj->sgt;
37
38         /*
39          * For non-cached buffers, ensure the new pages are clean
40          * because display controller, GPU, etc. are not coherent:
41          *
42          * WARNING: The DMA API does not support concurrent CPU
43          * and device access to the memory area.  With BIDIRECTIONAL,
44          * we will clean the cache lines which overlap the region,
45          * and invalidate all cache lines (partially) contained in
46          * the region.
47          *
48          * If you have dirty data in the overlapping cache lines,
49          * that will corrupt the GPU-written data.  If you have
50          * written into the remainder of the region, this can
51          * discard those writes.
52          */
53         if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
54                 dma_unmap_sgtable(dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
55 }
56
57 /* called with etnaviv_obj->lock held */
58 static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
59 {
60         struct drm_device *dev = etnaviv_obj->base.dev;
61         struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
62
63         if (IS_ERR(p)) {
64                 dev_dbg(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
65                 return PTR_ERR(p);
66         }
67
68         etnaviv_obj->pages = p;
69
70         return 0;
71 }
72
73 static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
74 {
75         if (etnaviv_obj->sgt) {
76                 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
77                 sg_free_table(etnaviv_obj->sgt);
78                 kfree(etnaviv_obj->sgt);
79                 etnaviv_obj->sgt = NULL;
80         }
81         if (etnaviv_obj->pages) {
82                 drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
83                                   true, false);
84
85                 etnaviv_obj->pages = NULL;
86         }
87 }
88
89 struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
90 {
91         int ret;
92
93         lockdep_assert_held(&etnaviv_obj->lock);
94
95         if (!etnaviv_obj->pages) {
96                 ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
97                 if (ret < 0)
98                         return ERR_PTR(ret);
99         }
100
101         if (!etnaviv_obj->sgt) {
102                 struct drm_device *dev = etnaviv_obj->base.dev;
103                 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
104                 struct sg_table *sgt;
105
106                 sgt = drm_prime_pages_to_sg(etnaviv_obj->base.dev,
107                                             etnaviv_obj->pages, npages);
108                 if (IS_ERR(sgt)) {
109                         dev_err(dev->dev, "failed to allocate sgt: %ld\n",
110                                 PTR_ERR(sgt));
111                         return ERR_CAST(sgt);
112                 }
113
114                 etnaviv_obj->sgt = sgt;
115
116                 etnaviv_gem_scatter_map(etnaviv_obj);
117         }
118
119         return etnaviv_obj->pages;
120 }
121
122 void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
123 {
124         lockdep_assert_held(&etnaviv_obj->lock);
125         /* when we start tracking the pin count, then do something here */
126 }
127
128 static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
129                 struct vm_area_struct *vma)
130 {
131         pgprot_t vm_page_prot;
132
133         vma->vm_flags &= ~VM_PFNMAP;
134         vma->vm_flags |= VM_MIXEDMAP;
135
136         vm_page_prot = vm_get_page_prot(vma->vm_flags);
137
138         if (etnaviv_obj->flags & ETNA_BO_WC) {
139                 vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
140         } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
141                 vma->vm_page_prot = pgprot_noncached(vm_page_prot);
142         } else {
143                 /*
144                  * Shunt off cached objs to shmem file so they have their own
145                  * address_space (so unmap_mapping_range does what we want,
146                  * in particular in the case of mmap'd dmabufs)
147                  */
148                 fput(vma->vm_file);
149                 get_file(etnaviv_obj->base.filp);
150                 vma->vm_pgoff = 0;
151                 vma->vm_file  = etnaviv_obj->base.filp;
152
153                 vma->vm_page_prot = vm_page_prot;
154         }
155
156         return 0;
157 }
158
159 int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
160 {
161         struct etnaviv_gem_object *obj;
162         int ret;
163
164         ret = drm_gem_mmap(filp, vma);
165         if (ret) {
166                 DBG("mmap failed: %d", ret);
167                 return ret;
168         }
169
170         obj = to_etnaviv_bo(vma->vm_private_data);
171         return obj->ops->mmap(obj, vma);
172 }
173
174 static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
175 {
176         struct vm_area_struct *vma = vmf->vma;
177         struct drm_gem_object *obj = vma->vm_private_data;
178         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
179         struct page **pages, *page;
180         pgoff_t pgoff;
181         int err;
182
183         /*
184          * Make sure we don't parallel update on a fault, nor move or remove
185          * something from beneath our feet.  Note that vmf_insert_page() is
186          * specifically coded to take care of this, so we don't have to.
187          */
188         err = mutex_lock_interruptible(&etnaviv_obj->lock);
189         if (err)
190                 return VM_FAULT_NOPAGE;
191         /* make sure we have pages attached now */
192         pages = etnaviv_gem_get_pages(etnaviv_obj);
193         mutex_unlock(&etnaviv_obj->lock);
194
195         if (IS_ERR(pages)) {
196                 err = PTR_ERR(pages);
197                 return vmf_error(err);
198         }
199
200         /* We don't use vmf->pgoff since that has the fake offset: */
201         pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
202
203         page = pages[pgoff];
204
205         VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
206              page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
207
208         return vmf_insert_page(vma, vmf->address, page);
209 }
210
211 int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
212 {
213         int ret;
214
215         /* Make it mmapable */
216         ret = drm_gem_create_mmap_offset(obj);
217         if (ret)
218                 dev_err(obj->dev->dev, "could not allocate mmap offset\n");
219         else
220                 *offset = drm_vma_node_offset_addr(&obj->vma_node);
221
222         return ret;
223 }
224
225 static struct etnaviv_vram_mapping *
226 etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
227                              struct etnaviv_iommu_context *context)
228 {
229         struct etnaviv_vram_mapping *mapping;
230
231         list_for_each_entry(mapping, &obj->vram_list, obj_node) {
232                 if (mapping->context == context)
233                         return mapping;
234         }
235
236         return NULL;
237 }
238
239 void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
240 {
241         struct etnaviv_gem_object *etnaviv_obj = mapping->object;
242
243         mutex_lock(&etnaviv_obj->lock);
244         WARN_ON(mapping->use == 0);
245         mapping->use -= 1;
246         mutex_unlock(&etnaviv_obj->lock);
247
248         drm_gem_object_put(&etnaviv_obj->base);
249 }
250
251 struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
252         struct drm_gem_object *obj, struct etnaviv_iommu_context *mmu_context,
253         u64 va)
254 {
255         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
256         struct etnaviv_vram_mapping *mapping;
257         struct page **pages;
258         int ret = 0;
259
260         mutex_lock(&etnaviv_obj->lock);
261         mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context);
262         if (mapping) {
263                 /*
264                  * Holding the object lock prevents the use count changing
265                  * beneath us.  If the use count is zero, the MMU might be
266                  * reaping this object, so take the lock and re-check that
267                  * the MMU owns this mapping to close this race.
268                  */
269                 if (mapping->use == 0) {
270                         mutex_lock(&mmu_context->lock);
271                         if (mapping->context == mmu_context)
272                                 mapping->use += 1;
273                         else
274                                 mapping = NULL;
275                         mutex_unlock(&mmu_context->lock);
276                         if (mapping)
277                                 goto out;
278                 } else {
279                         mapping->use += 1;
280                         goto out;
281                 }
282         }
283
284         pages = etnaviv_gem_get_pages(etnaviv_obj);
285         if (IS_ERR(pages)) {
286                 ret = PTR_ERR(pages);
287                 goto out;
288         }
289
290         /*
291          * See if we have a reaped vram mapping we can re-use before
292          * allocating a fresh mapping.
293          */
294         mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL);
295         if (!mapping) {
296                 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
297                 if (!mapping) {
298                         ret = -ENOMEM;
299                         goto out;
300                 }
301
302                 INIT_LIST_HEAD(&mapping->scan_node);
303                 mapping->object = etnaviv_obj;
304         } else {
305                 list_del(&mapping->obj_node);
306         }
307
308         etnaviv_iommu_context_get(mmu_context);
309         mapping->context = mmu_context;
310         mapping->use = 1;
311
312         ret = etnaviv_iommu_map_gem(mmu_context, etnaviv_obj,
313                                     mmu_context->global->memory_base,
314                                     mapping, va);
315         if (ret < 0) {
316                 etnaviv_iommu_context_put(mmu_context);
317                 kfree(mapping);
318         } else {
319                 list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
320         }
321
322 out:
323         mutex_unlock(&etnaviv_obj->lock);
324
325         if (ret)
326                 return ERR_PTR(ret);
327
328         /* Take a reference on the object */
329         drm_gem_object_get(obj);
330         return mapping;
331 }
332
333 void *etnaviv_gem_vmap(struct drm_gem_object *obj)
334 {
335         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
336
337         if (etnaviv_obj->vaddr)
338                 return etnaviv_obj->vaddr;
339
340         mutex_lock(&etnaviv_obj->lock);
341         /*
342          * Need to check again, as we might have raced with another thread
343          * while waiting for the mutex.
344          */
345         if (!etnaviv_obj->vaddr)
346                 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
347         mutex_unlock(&etnaviv_obj->lock);
348
349         return etnaviv_obj->vaddr;
350 }
351
352 static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
353 {
354         struct page **pages;
355
356         lockdep_assert_held(&obj->lock);
357
358         pages = etnaviv_gem_get_pages(obj);
359         if (IS_ERR(pages))
360                 return NULL;
361
362         return vmap(pages, obj->base.size >> PAGE_SHIFT,
363                         VM_MAP, pgprot_writecombine(PAGE_KERNEL));
364 }
365
366 static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
367 {
368         if (op & ETNA_PREP_READ)
369                 return DMA_FROM_DEVICE;
370         else if (op & ETNA_PREP_WRITE)
371                 return DMA_TO_DEVICE;
372         else
373                 return DMA_BIDIRECTIONAL;
374 }
375
376 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
377                 struct drm_etnaviv_timespec *timeout)
378 {
379         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
380         struct drm_device *dev = obj->dev;
381         bool write = !!(op & ETNA_PREP_WRITE);
382         int ret;
383
384         if (!etnaviv_obj->sgt) {
385                 void *ret;
386
387                 mutex_lock(&etnaviv_obj->lock);
388                 ret = etnaviv_gem_get_pages(etnaviv_obj);
389                 mutex_unlock(&etnaviv_obj->lock);
390                 if (IS_ERR(ret))
391                         return PTR_ERR(ret);
392         }
393
394         if (op & ETNA_PREP_NOSYNC) {
395                 if (!dma_resv_test_signaled_rcu(obj->resv,
396                                                           write))
397                         return -EBUSY;
398         } else {
399                 unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
400
401                 ret = dma_resv_wait_timeout_rcu(obj->resv,
402                                                           write, true, remain);
403                 if (ret <= 0)
404                         return ret == 0 ? -ETIMEDOUT : ret;
405         }
406
407         if (etnaviv_obj->flags & ETNA_BO_CACHED) {
408                 dma_sync_sgtable_for_cpu(dev->dev, etnaviv_obj->sgt,
409                                          etnaviv_op_to_dma_dir(op));
410                 etnaviv_obj->last_cpu_prep_op = op;
411         }
412
413         return 0;
414 }
415
416 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
417 {
418         struct drm_device *dev = obj->dev;
419         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
420
421         if (etnaviv_obj->flags & ETNA_BO_CACHED) {
422                 /* fini without a prep is almost certainly a userspace error */
423                 WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
424                 dma_sync_sgtable_for_device(dev->dev, etnaviv_obj->sgt,
425                         etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
426                 etnaviv_obj->last_cpu_prep_op = 0;
427         }
428
429         return 0;
430 }
431
432 int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
433         struct drm_etnaviv_timespec *timeout)
434 {
435         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
436
437         return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
438 }
439
440 #ifdef CONFIG_DEBUG_FS
441 static void etnaviv_gem_describe_fence(struct dma_fence *fence,
442         const char *type, struct seq_file *m)
443 {
444         if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
445                 seq_printf(m, "\t%9s: %s %s seq %llu\n",
446                            type,
447                            fence->ops->get_driver_name(fence),
448                            fence->ops->get_timeline_name(fence),
449                            fence->seqno);
450 }
451
452 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
453 {
454         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
455         struct dma_resv *robj = obj->resv;
456         struct dma_resv_list *fobj;
457         struct dma_fence *fence;
458         unsigned long off = drm_vma_node_start(&obj->vma_node);
459
460         seq_printf(m, "%08x: %c %2d (%2d) %08lx %p %zd\n",
461                         etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
462                         obj->name, kref_read(&obj->refcount),
463                         off, etnaviv_obj->vaddr, obj->size);
464
465         rcu_read_lock();
466         fobj = rcu_dereference(robj->fence);
467         if (fobj) {
468                 unsigned int i, shared_count = fobj->shared_count;
469
470                 for (i = 0; i < shared_count; i++) {
471                         fence = rcu_dereference(fobj->shared[i]);
472                         etnaviv_gem_describe_fence(fence, "Shared", m);
473                 }
474         }
475
476         fence = rcu_dereference(robj->fence_excl);
477         if (fence)
478                 etnaviv_gem_describe_fence(fence, "Exclusive", m);
479         rcu_read_unlock();
480 }
481
482 void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
483         struct seq_file *m)
484 {
485         struct etnaviv_gem_object *etnaviv_obj;
486         int count = 0;
487         size_t size = 0;
488
489         mutex_lock(&priv->gem_lock);
490         list_for_each_entry(etnaviv_obj, &priv->gem_list, gem_node) {
491                 struct drm_gem_object *obj = &etnaviv_obj->base;
492
493                 seq_puts(m, "   ");
494                 etnaviv_gem_describe(obj, m);
495                 count++;
496                 size += obj->size;
497         }
498         mutex_unlock(&priv->gem_lock);
499
500         seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
501 }
502 #endif
503
504 static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
505 {
506         vunmap(etnaviv_obj->vaddr);
507         put_pages(etnaviv_obj);
508 }
509
510 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
511         .get_pages = etnaviv_gem_shmem_get_pages,
512         .release = etnaviv_gem_shmem_release,
513         .vmap = etnaviv_gem_vmap_impl,
514         .mmap = etnaviv_gem_mmap_obj,
515 };
516
517 void etnaviv_gem_free_object(struct drm_gem_object *obj)
518 {
519         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
520         struct etnaviv_drm_private *priv = obj->dev->dev_private;
521         struct etnaviv_vram_mapping *mapping, *tmp;
522
523         /* object should not be active */
524         WARN_ON(is_active(etnaviv_obj));
525
526         mutex_lock(&priv->gem_lock);
527         list_del(&etnaviv_obj->gem_node);
528         mutex_unlock(&priv->gem_lock);
529
530         list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
531                                  obj_node) {
532                 struct etnaviv_iommu_context *context = mapping->context;
533
534                 WARN_ON(mapping->use);
535
536                 if (context) {
537                         etnaviv_iommu_unmap_gem(context, mapping);
538                         etnaviv_iommu_context_put(context);
539                 }
540
541                 list_del(&mapping->obj_node);
542                 kfree(mapping);
543         }
544
545         drm_gem_free_mmap_offset(obj);
546         etnaviv_obj->ops->release(etnaviv_obj);
547         drm_gem_object_release(obj);
548
549         kfree(etnaviv_obj);
550 }
551
552 void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
553 {
554         struct etnaviv_drm_private *priv = dev->dev_private;
555         struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
556
557         mutex_lock(&priv->gem_lock);
558         list_add_tail(&etnaviv_obj->gem_node, &priv->gem_list);
559         mutex_unlock(&priv->gem_lock);
560 }
561
562 static const struct vm_operations_struct vm_ops = {
563         .fault = etnaviv_gem_fault,
564         .open = drm_gem_vm_open,
565         .close = drm_gem_vm_close,
566 };
567
568 static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
569         .free = etnaviv_gem_free_object,
570         .pin = etnaviv_gem_prime_pin,
571         .unpin = etnaviv_gem_prime_unpin,
572         .get_sg_table = etnaviv_gem_prime_get_sg_table,
573         .vmap = etnaviv_gem_prime_vmap,
574         .vunmap = etnaviv_gem_prime_vunmap,
575         .vm_ops = &vm_ops,
576 };
577
578 static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
579         const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
580 {
581         struct etnaviv_gem_object *etnaviv_obj;
582         unsigned sz = sizeof(*etnaviv_obj);
583         bool valid = true;
584
585         /* validate flags */
586         switch (flags & ETNA_BO_CACHE_MASK) {
587         case ETNA_BO_UNCACHED:
588         case ETNA_BO_CACHED:
589         case ETNA_BO_WC:
590                 break;
591         default:
592                 valid = false;
593         }
594
595         if (!valid) {
596                 dev_err(dev->dev, "invalid cache flag: %x\n",
597                         (flags & ETNA_BO_CACHE_MASK));
598                 return -EINVAL;
599         }
600
601         etnaviv_obj = kzalloc(sz, GFP_KERNEL);
602         if (!etnaviv_obj)
603                 return -ENOMEM;
604
605         etnaviv_obj->flags = flags;
606         etnaviv_obj->ops = ops;
607
608         mutex_init(&etnaviv_obj->lock);
609         INIT_LIST_HEAD(&etnaviv_obj->vram_list);
610
611         *obj = &etnaviv_obj->base;
612         (*obj)->funcs = &etnaviv_gem_object_funcs;
613
614         return 0;
615 }
616
617 /* convenience method to construct a GEM buffer object, and userspace handle */
618 int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
619         u32 size, u32 flags, u32 *handle)
620 {
621         struct etnaviv_drm_private *priv = dev->dev_private;
622         struct drm_gem_object *obj = NULL;
623         int ret;
624
625         size = PAGE_ALIGN(size);
626
627         ret = etnaviv_gem_new_impl(dev, size, flags,
628                                    &etnaviv_gem_shmem_ops, &obj);
629         if (ret)
630                 goto fail;
631
632         lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
633
634         ret = drm_gem_object_init(dev, obj, size);
635         if (ret)
636                 goto fail;
637
638         /*
639          * Our buffers are kept pinned, so allocating them from the MOVABLE
640          * zone is a really bad idea, and conflicts with CMA. See comments
641          * above new_inode() why this is required _and_ expected if you're
642          * going to pin these pages.
643          */
644         mapping_set_gfp_mask(obj->filp->f_mapping, priv->shm_gfp_mask);
645
646         etnaviv_gem_obj_add(dev, obj);
647
648         ret = drm_gem_handle_create(file, obj, handle);
649
650         /* drop reference from allocate - handle holds it now */
651 fail:
652         drm_gem_object_put(obj);
653
654         return ret;
655 }
656
657 int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
658         const struct etnaviv_gem_ops *ops, struct etnaviv_gem_object **res)
659 {
660         struct drm_gem_object *obj;
661         int ret;
662
663         ret = etnaviv_gem_new_impl(dev, size, flags, ops, &obj);
664         if (ret)
665                 return ret;
666
667         drm_gem_private_object_init(dev, obj, size);
668
669         *res = to_etnaviv_bo(obj);
670
671         return 0;
672 }
673
674 static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
675 {
676         struct page **pvec = NULL;
677         struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
678         int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
679
680         might_lock_read(&current->mm->mmap_lock);
681
682         if (userptr->mm != current->mm)
683                 return -EPERM;
684
685         pvec = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
686         if (!pvec)
687                 return -ENOMEM;
688
689         do {
690                 unsigned num_pages = npages - pinned;
691                 uint64_t ptr = userptr->ptr + pinned * PAGE_SIZE;
692                 struct page **pages = pvec + pinned;
693
694                 ret = pin_user_pages_fast(ptr, num_pages,
695                                           !userptr->ro ? FOLL_WRITE : 0, pages);
696                 if (ret < 0) {
697                         unpin_user_pages(pvec, pinned);
698                         kvfree(pvec);
699                         return ret;
700                 }
701
702                 pinned += ret;
703
704         } while (pinned < npages);
705
706         etnaviv_obj->pages = pvec;
707
708         return 0;
709 }
710
711 static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
712 {
713         if (etnaviv_obj->sgt) {
714                 etnaviv_gem_scatterlist_unmap(etnaviv_obj);
715                 sg_free_table(etnaviv_obj->sgt);
716                 kfree(etnaviv_obj->sgt);
717         }
718         if (etnaviv_obj->pages) {
719                 int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
720
721                 unpin_user_pages(etnaviv_obj->pages, npages);
722                 kvfree(etnaviv_obj->pages);
723         }
724 }
725
726 static int etnaviv_gem_userptr_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
727                 struct vm_area_struct *vma)
728 {
729         return -EINVAL;
730 }
731
732 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
733         .get_pages = etnaviv_gem_userptr_get_pages,
734         .release = etnaviv_gem_userptr_release,
735         .vmap = etnaviv_gem_vmap_impl,
736         .mmap = etnaviv_gem_userptr_mmap_obj,
737 };
738
739 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
740         uintptr_t ptr, u32 size, u32 flags, u32 *handle)
741 {
742         struct etnaviv_gem_object *etnaviv_obj;
743         int ret;
744
745         ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED,
746                                       &etnaviv_gem_userptr_ops, &etnaviv_obj);
747         if (ret)
748                 return ret;
749
750         lockdep_set_class(&etnaviv_obj->lock, &etnaviv_userptr_lock_class);
751
752         etnaviv_obj->userptr.ptr = ptr;
753         etnaviv_obj->userptr.mm = current->mm;
754         etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
755
756         etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
757
758         ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
759
760         /* drop reference from allocate - handle holds it now */
761         drm_gem_object_put(&etnaviv_obj->base);
762         return ret;
763 }