1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 #include <linux/dma-map-ops.h>
8 #include <linux/spinlock.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/dma-buf.h>
11 #include <linux/pfn_t.h>
13 #include <drm/drm_prime.h>
16 #include "msm_fence.h"
21 static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
24 static dma_addr_t physaddr(struct drm_gem_object *obj)
26 struct msm_gem_object *msm_obj = to_msm_bo(obj);
27 struct msm_drm_private *priv = obj->dev->dev_private;
28 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
32 static bool use_pages(struct drm_gem_object *obj)
34 struct msm_gem_object *msm_obj = to_msm_bo(obj);
35 return !msm_obj->vram_node;
39 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
40 * API. Really GPU cache is out of scope here (handled on cmdstream)
41 * and all we need to do is invalidate newly allocated pages before
42 * mapping to CPU as uncached/writecombine.
44 * On top of this, we have the added headache, that depending on
45 * display generation, the display's iommu may be wired up to either
46 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
47 * that here we either have dma-direct or iommu ops.
49 * Let this be a cautionary tail of abstraction gone wrong.
52 static void sync_for_device(struct msm_gem_object *msm_obj)
54 struct device *dev = msm_obj->base.dev->dev;
56 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
59 static void sync_for_cpu(struct msm_gem_object *msm_obj)
61 struct device *dev = msm_obj->base.dev->dev;
63 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
66 /* allocate pages from VRAM carveout, used when no IOMMU: */
67 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
69 struct msm_gem_object *msm_obj = to_msm_bo(obj);
70 struct msm_drm_private *priv = obj->dev->dev_private;
75 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
77 return ERR_PTR(-ENOMEM);
79 spin_lock(&priv->vram.lock);
80 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
81 spin_unlock(&priv->vram.lock);
87 paddr = physaddr(obj);
88 for (i = 0; i < npages; i++) {
89 p[i] = phys_to_page(paddr);
96 static struct page **get_pages(struct drm_gem_object *obj)
98 struct msm_gem_object *msm_obj = to_msm_bo(obj);
100 if (!msm_obj->pages) {
101 struct drm_device *dev = obj->dev;
103 int npages = obj->size >> PAGE_SHIFT;
106 p = drm_gem_get_pages(obj);
108 p = get_pages_vram(obj, npages);
111 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
118 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
119 if (IS_ERR(msm_obj->sgt)) {
120 void *ptr = ERR_CAST(msm_obj->sgt);
122 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
127 /* For non-cached buffers, ensure the new pages are clean
128 * because display controller, GPU, etc. are not coherent:
130 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
131 sync_for_device(msm_obj);
134 return msm_obj->pages;
137 static void put_pages_vram(struct drm_gem_object *obj)
139 struct msm_gem_object *msm_obj = to_msm_bo(obj);
140 struct msm_drm_private *priv = obj->dev->dev_private;
142 spin_lock(&priv->vram.lock);
143 drm_mm_remove_node(msm_obj->vram_node);
144 spin_unlock(&priv->vram.lock);
146 kvfree(msm_obj->pages);
149 static void put_pages(struct drm_gem_object *obj)
151 struct msm_gem_object *msm_obj = to_msm_bo(obj);
153 if (msm_obj->pages) {
155 /* For non-cached buffers, ensure the new
156 * pages are clean because display controller,
157 * GPU, etc. are not coherent:
159 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
160 sync_for_cpu(msm_obj);
162 sg_free_table(msm_obj->sgt);
167 drm_gem_put_pages(obj, msm_obj->pages, true, false);
171 msm_obj->pages = NULL;
175 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
177 struct msm_gem_object *msm_obj = to_msm_bo(obj);
180 mutex_lock(&msm_obj->lock);
182 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
183 mutex_unlock(&msm_obj->lock);
184 return ERR_PTR(-EBUSY);
188 mutex_unlock(&msm_obj->lock);
192 void msm_gem_put_pages(struct drm_gem_object *obj)
194 /* when we start tracking the pin count, then do something here */
197 int msm_gem_mmap_obj(struct drm_gem_object *obj,
198 struct vm_area_struct *vma)
200 struct msm_gem_object *msm_obj = to_msm_bo(obj);
202 vma->vm_flags &= ~VM_PFNMAP;
203 vma->vm_flags |= VM_MIXEDMAP;
205 if (msm_obj->flags & MSM_BO_WC) {
206 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
207 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
208 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
211 * Shunt off cached objs to shmem file so they have their own
212 * address_space (so unmap_mapping_range does what we want,
213 * in particular in the case of mmap'd dmabufs)
216 vma_set_file(vma, obj->filp);
218 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
224 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
228 ret = drm_gem_mmap(filp, vma);
230 DBG("mmap failed: %d", ret);
234 return msm_gem_mmap_obj(vma->vm_private_data, vma);
237 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
239 struct vm_area_struct *vma = vmf->vma;
240 struct drm_gem_object *obj = vma->vm_private_data;
241 struct msm_gem_object *msm_obj = to_msm_bo(obj);
249 * vm_ops.open/drm_gem_mmap_obj and close get and put
250 * a reference on obj. So, we dont need to hold one here.
252 err = mutex_lock_interruptible(&msm_obj->lock);
254 ret = VM_FAULT_NOPAGE;
258 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
259 mutex_unlock(&msm_obj->lock);
260 return VM_FAULT_SIGBUS;
263 /* make sure we have pages attached now */
264 pages = get_pages(obj);
266 ret = vmf_error(PTR_ERR(pages));
270 /* We don't use vmf->pgoff since that has the fake offset: */
271 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
273 pfn = page_to_pfn(pages[pgoff]);
275 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
276 pfn, pfn << PAGE_SHIFT);
278 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
280 mutex_unlock(&msm_obj->lock);
285 /** get mmap offset */
286 static uint64_t mmap_offset(struct drm_gem_object *obj)
288 struct drm_device *dev = obj->dev;
289 struct msm_gem_object *msm_obj = to_msm_bo(obj);
292 WARN_ON(!mutex_is_locked(&msm_obj->lock));
294 /* Make it mmapable */
295 ret = drm_gem_create_mmap_offset(obj);
298 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
302 return drm_vma_node_offset_addr(&obj->vma_node);
305 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
308 struct msm_gem_object *msm_obj = to_msm_bo(obj);
310 mutex_lock(&msm_obj->lock);
311 offset = mmap_offset(obj);
312 mutex_unlock(&msm_obj->lock);
316 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
317 struct msm_gem_address_space *aspace)
319 struct msm_gem_object *msm_obj = to_msm_bo(obj);
320 struct msm_gem_vma *vma;
322 WARN_ON(!mutex_is_locked(&msm_obj->lock));
324 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
326 return ERR_PTR(-ENOMEM);
328 vma->aspace = aspace;
330 list_add_tail(&vma->list, &msm_obj->vmas);
335 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
336 struct msm_gem_address_space *aspace)
338 struct msm_gem_object *msm_obj = to_msm_bo(obj);
339 struct msm_gem_vma *vma;
341 WARN_ON(!mutex_is_locked(&msm_obj->lock));
343 list_for_each_entry(vma, &msm_obj->vmas, list) {
344 if (vma->aspace == aspace)
351 static void del_vma(struct msm_gem_vma *vma)
356 list_del(&vma->list);
360 /* Called with msm_obj->lock locked */
362 put_iova(struct drm_gem_object *obj)
364 struct msm_gem_object *msm_obj = to_msm_bo(obj);
365 struct msm_gem_vma *vma, *tmp;
367 WARN_ON(!mutex_is_locked(&msm_obj->lock));
369 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
371 msm_gem_purge_vma(vma->aspace, vma);
372 msm_gem_close_vma(vma->aspace, vma);
378 static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
379 struct msm_gem_address_space *aspace, uint64_t *iova,
380 u64 range_start, u64 range_end)
382 struct msm_gem_object *msm_obj = to_msm_bo(obj);
383 struct msm_gem_vma *vma;
386 WARN_ON(!mutex_is_locked(&msm_obj->lock));
388 vma = lookup_vma(obj, aspace);
391 vma = add_vma(obj, aspace);
395 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
396 range_start, range_end);
407 static int msm_gem_pin_iova(struct drm_gem_object *obj,
408 struct msm_gem_address_space *aspace)
410 struct msm_gem_object *msm_obj = to_msm_bo(obj);
411 struct msm_gem_vma *vma;
413 int prot = IOMMU_READ;
415 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
418 if (msm_obj->flags & MSM_BO_MAP_PRIV)
421 WARN_ON(!mutex_is_locked(&msm_obj->lock));
423 if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
426 vma = lookup_vma(obj, aspace);
430 pages = get_pages(obj);
432 return PTR_ERR(pages);
434 return msm_gem_map_vma(aspace, vma, prot,
435 msm_obj->sgt, obj->size >> PAGE_SHIFT);
439 * get iova and pin it. Should have a matching put
440 * limits iova to specified range (in pages)
442 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
443 struct msm_gem_address_space *aspace, uint64_t *iova,
444 u64 range_start, u64 range_end)
446 struct msm_gem_object *msm_obj = to_msm_bo(obj);
450 mutex_lock(&msm_obj->lock);
452 ret = msm_gem_get_iova_locked(obj, aspace, &local,
453 range_start, range_end);
456 ret = msm_gem_pin_iova(obj, aspace);
461 mutex_unlock(&msm_obj->lock);
465 /* get iova and pin it. Should have a matching put */
466 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
467 struct msm_gem_address_space *aspace, uint64_t *iova)
469 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
473 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
474 * valid for the life of the object
476 int msm_gem_get_iova(struct drm_gem_object *obj,
477 struct msm_gem_address_space *aspace, uint64_t *iova)
479 struct msm_gem_object *msm_obj = to_msm_bo(obj);
482 mutex_lock(&msm_obj->lock);
483 ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX);
484 mutex_unlock(&msm_obj->lock);
489 /* get iova without taking a reference, used in places where you have
490 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
492 uint64_t msm_gem_iova(struct drm_gem_object *obj,
493 struct msm_gem_address_space *aspace)
495 struct msm_gem_object *msm_obj = to_msm_bo(obj);
496 struct msm_gem_vma *vma;
498 mutex_lock(&msm_obj->lock);
499 vma = lookup_vma(obj, aspace);
500 mutex_unlock(&msm_obj->lock);
503 return vma ? vma->iova : 0;
507 * Unpin a iova by updating the reference counts. The memory isn't actually
508 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
511 void msm_gem_unpin_iova(struct drm_gem_object *obj,
512 struct msm_gem_address_space *aspace)
514 struct msm_gem_object *msm_obj = to_msm_bo(obj);
515 struct msm_gem_vma *vma;
517 mutex_lock(&msm_obj->lock);
518 vma = lookup_vma(obj, aspace);
521 msm_gem_unmap_vma(aspace, vma);
523 mutex_unlock(&msm_obj->lock);
526 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
527 struct drm_mode_create_dumb *args)
529 args->pitch = align_pitch(args->width, args->bpp);
530 args->size = PAGE_ALIGN(args->pitch * args->height);
531 return msm_gem_new_handle(dev, file, args->size,
532 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
535 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
536 uint32_t handle, uint64_t *offset)
538 struct drm_gem_object *obj;
541 /* GEM does all our handle to object mapping */
542 obj = drm_gem_object_lookup(file, handle);
548 *offset = msm_gem_mmap_offset(obj);
550 drm_gem_object_put(obj);
556 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
558 struct msm_gem_object *msm_obj = to_msm_bo(obj);
561 if (obj->import_attach)
562 return ERR_PTR(-ENODEV);
564 mutex_lock(&msm_obj->lock);
566 if (WARN_ON(msm_obj->madv > madv)) {
567 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
568 msm_obj->madv, madv);
569 mutex_unlock(&msm_obj->lock);
570 return ERR_PTR(-EBUSY);
573 /* increment vmap_count *before* vmap() call, so shrinker can
574 * check vmap_count (is_vunmapable()) outside of msm_obj->lock.
575 * This guarantees that we won't try to msm_gem_vunmap() this
576 * same object from within the vmap() call (while we already
577 * hold msm_obj->lock)
579 msm_obj->vmap_count++;
581 if (!msm_obj->vaddr) {
582 struct page **pages = get_pages(obj);
584 ret = PTR_ERR(pages);
587 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
588 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
589 if (msm_obj->vaddr == NULL) {
595 mutex_unlock(&msm_obj->lock);
596 return msm_obj->vaddr;
599 msm_obj->vmap_count--;
600 mutex_unlock(&msm_obj->lock);
604 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
606 return get_vaddr(obj, MSM_MADV_WILLNEED);
610 * Don't use this! It is for the very special case of dumping
611 * submits from GPU hangs or faults, were the bo may already
612 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
615 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
617 return get_vaddr(obj, __MSM_MADV_PURGED);
620 void msm_gem_put_vaddr(struct drm_gem_object *obj)
622 struct msm_gem_object *msm_obj = to_msm_bo(obj);
624 mutex_lock(&msm_obj->lock);
625 WARN_ON(msm_obj->vmap_count < 1);
626 msm_obj->vmap_count--;
627 mutex_unlock(&msm_obj->lock);
630 /* Update madvise status, returns true if not purged, else
633 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
635 struct msm_gem_object *msm_obj = to_msm_bo(obj);
637 mutex_lock(&msm_obj->lock);
639 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
641 if (msm_obj->madv != __MSM_MADV_PURGED)
642 msm_obj->madv = madv;
644 madv = msm_obj->madv;
646 mutex_unlock(&msm_obj->lock);
648 return (madv != __MSM_MADV_PURGED);
651 void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass)
653 struct drm_device *dev = obj->dev;
654 struct msm_gem_object *msm_obj = to_msm_bo(obj);
656 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
657 WARN_ON(!is_purgeable(msm_obj));
658 WARN_ON(obj->import_attach);
660 mutex_lock_nested(&msm_obj->lock, subclass);
664 msm_gem_vunmap_locked(obj);
668 msm_obj->madv = __MSM_MADV_PURGED;
670 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
671 drm_gem_free_mmap_offset(obj);
673 /* Our goal here is to return as much of the memory as
674 * is possible back to the system as we are called from OOM.
675 * To do this we must instruct the shmfs to drop all of its
676 * backing pages, *now*.
678 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
680 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
683 mutex_unlock(&msm_obj->lock);
686 static void msm_gem_vunmap_locked(struct drm_gem_object *obj)
688 struct msm_gem_object *msm_obj = to_msm_bo(obj);
690 WARN_ON(!mutex_is_locked(&msm_obj->lock));
692 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
695 vunmap(msm_obj->vaddr);
696 msm_obj->vaddr = NULL;
699 void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
701 struct msm_gem_object *msm_obj = to_msm_bo(obj);
703 mutex_lock_nested(&msm_obj->lock, subclass);
704 msm_gem_vunmap_locked(obj);
705 mutex_unlock(&msm_obj->lock);
708 /* must be called before _move_to_active().. */
709 int msm_gem_sync_object(struct drm_gem_object *obj,
710 struct msm_fence_context *fctx, bool exclusive)
712 struct dma_resv_list *fobj;
713 struct dma_fence *fence;
716 fobj = dma_resv_get_list(obj->resv);
717 if (!fobj || (fobj->shared_count == 0)) {
718 fence = dma_resv_get_excl(obj->resv);
719 /* don't need to wait on our own fences, since ring is fifo */
720 if (fence && (fence->context != fctx->context)) {
721 ret = dma_fence_wait(fence, true);
727 if (!exclusive || !fobj)
730 for (i = 0; i < fobj->shared_count; i++) {
731 fence = rcu_dereference_protected(fobj->shared[i],
732 dma_resv_held(obj->resv));
733 if (fence->context != fctx->context) {
734 ret = dma_fence_wait(fence, true);
743 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
745 struct msm_gem_object *msm_obj = to_msm_bo(obj);
746 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
747 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
749 if (!atomic_fetch_inc(&msm_obj->active_count)) {
751 list_del_init(&msm_obj->mm_list);
752 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
756 void msm_gem_active_put(struct drm_gem_object *obj)
758 struct msm_gem_object *msm_obj = to_msm_bo(obj);
759 struct msm_drm_private *priv = obj->dev->dev_private;
761 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
763 if (!atomic_dec_return(&msm_obj->active_count)) {
765 list_del_init(&msm_obj->mm_list);
766 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
770 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
772 bool write = !!(op & MSM_PREP_WRITE);
773 unsigned long remain =
774 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
777 ret = dma_resv_wait_timeout_rcu(obj->resv, write,
780 return remain == 0 ? -EBUSY : -ETIMEDOUT;
784 /* TODO cache maintenance */
789 int msm_gem_cpu_fini(struct drm_gem_object *obj)
791 /* TODO cache maintenance */
795 #ifdef CONFIG_DEBUG_FS
796 static void describe_fence(struct dma_fence *fence, const char *type,
799 if (!dma_fence_is_signaled(fence))
800 seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
801 fence->ops->get_driver_name(fence),
802 fence->ops->get_timeline_name(fence),
806 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
808 struct msm_gem_object *msm_obj = to_msm_bo(obj);
809 struct dma_resv *robj = obj->resv;
810 struct dma_resv_list *fobj;
811 struct dma_fence *fence;
812 struct msm_gem_vma *vma;
813 uint64_t off = drm_vma_node_start(&obj->vma_node);
816 mutex_lock(&msm_obj->lock);
818 switch (msm_obj->madv) {
819 case __MSM_MADV_PURGED:
822 case MSM_MADV_DONTNEED:
825 case MSM_MADV_WILLNEED:
831 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
832 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
833 obj->name, kref_read(&obj->refcount),
834 off, msm_obj->vaddr);
836 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
838 if (!list_empty(&msm_obj->vmas)) {
840 seq_puts(m, " vmas:");
842 list_for_each_entry(vma, &msm_obj->vmas, list) {
843 const char *name, *comm;
845 struct msm_gem_address_space *aspace = vma->aspace;
846 struct task_struct *task =
847 get_pid_task(aspace->pid, PIDTYPE_PID);
849 comm = kstrdup(task->comm, GFP_KERNEL);
857 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
858 name, comm ? ":" : "", comm ? comm : "",
859 vma->aspace, vma->iova,
860 vma->mapped ? "mapped" : "unmapped",
869 fobj = rcu_dereference(robj->fence);
871 unsigned int i, shared_count = fobj->shared_count;
873 for (i = 0; i < shared_count; i++) {
874 fence = rcu_dereference(fobj->shared[i]);
875 describe_fence(fence, "Shared", m);
879 fence = rcu_dereference(robj->fence_excl);
881 describe_fence(fence, "Exclusive", m);
884 mutex_unlock(&msm_obj->lock);
887 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
889 struct msm_gem_object *msm_obj;
893 seq_puts(m, " flags id ref offset kaddr size madv name\n");
894 list_for_each_entry(msm_obj, list, mm_list) {
895 struct drm_gem_object *obj = &msm_obj->base;
897 msm_gem_describe(obj, m);
902 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
906 /* don't call directly! Use drm_gem_object_put_locked() and friends */
907 void msm_gem_free_object(struct drm_gem_object *obj)
909 struct msm_gem_object *msm_obj = to_msm_bo(obj);
910 struct drm_device *dev = obj->dev;
911 struct msm_drm_private *priv = dev->dev_private;
913 if (llist_add(&msm_obj->freed, &priv->free_list))
914 queue_work(priv->wq, &priv->free_work);
917 static void free_object(struct msm_gem_object *msm_obj)
919 struct drm_gem_object *obj = &msm_obj->base;
920 struct drm_device *dev = obj->dev;
922 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
924 /* object should not be on active list: */
925 WARN_ON(is_active(msm_obj));
927 list_del(&msm_obj->mm_list);
929 mutex_lock(&msm_obj->lock);
933 if (obj->import_attach) {
934 WARN_ON(msm_obj->vaddr);
936 /* Don't drop the pages for imported dmabuf, as they are not
937 * ours, just free the array we allocated:
940 kvfree(msm_obj->pages);
942 drm_prime_gem_destroy(obj, msm_obj->sgt);
944 msm_gem_vunmap_locked(obj);
948 drm_gem_object_release(obj);
950 mutex_unlock(&msm_obj->lock);
954 void msm_gem_free_work(struct work_struct *work)
956 struct msm_drm_private *priv =
957 container_of(work, struct msm_drm_private, free_work);
958 struct drm_device *dev = priv->dev;
959 struct llist_node *freed;
960 struct msm_gem_object *msm_obj, *next;
962 while ((freed = llist_del_all(&priv->free_list))) {
964 mutex_lock(&dev->struct_mutex);
966 llist_for_each_entry_safe(msm_obj, next,
968 free_object(msm_obj);
970 mutex_unlock(&dev->struct_mutex);
977 /* convenience method to construct a GEM buffer object, and userspace handle */
978 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
979 uint32_t size, uint32_t flags, uint32_t *handle,
982 struct drm_gem_object *obj;
985 obj = msm_gem_new(dev, size, flags);
991 msm_gem_object_set_name(obj, "%s", name);
993 ret = drm_gem_handle_create(file, obj, handle);
995 /* drop reference from allocate - handle holds it now */
996 drm_gem_object_put(obj);
1001 static const struct vm_operations_struct vm_ops = {
1002 .fault = msm_gem_fault,
1003 .open = drm_gem_vm_open,
1004 .close = drm_gem_vm_close,
1007 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1008 .free = msm_gem_free_object,
1009 .pin = msm_gem_prime_pin,
1010 .unpin = msm_gem_prime_unpin,
1011 .get_sg_table = msm_gem_prime_get_sg_table,
1012 .vmap = msm_gem_prime_vmap,
1013 .vunmap = msm_gem_prime_vunmap,
1017 static int msm_gem_new_impl(struct drm_device *dev,
1018 uint32_t size, uint32_t flags,
1019 struct drm_gem_object **obj)
1021 struct msm_gem_object *msm_obj;
1023 switch (flags & MSM_BO_CACHE_MASK) {
1024 case MSM_BO_UNCACHED:
1029 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
1030 (flags & MSM_BO_CACHE_MASK));
1034 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1038 mutex_init(&msm_obj->lock);
1040 msm_obj->flags = flags;
1041 msm_obj->madv = MSM_MADV_WILLNEED;
1043 INIT_LIST_HEAD(&msm_obj->submit_entry);
1044 INIT_LIST_HEAD(&msm_obj->vmas);
1046 *obj = &msm_obj->base;
1047 (*obj)->funcs = &msm_gem_object_funcs;
1052 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
1053 uint32_t size, uint32_t flags, bool struct_mutex_locked)
1055 struct msm_drm_private *priv = dev->dev_private;
1056 struct msm_gem_object *msm_obj;
1057 struct drm_gem_object *obj = NULL;
1058 bool use_vram = false;
1061 size = PAGE_ALIGN(size);
1063 if (!msm_use_mmu(dev))
1065 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1068 if (WARN_ON(use_vram && !priv->vram.size))
1069 return ERR_PTR(-EINVAL);
1071 /* Disallow zero sized objects as they make the underlying
1072 * infrastructure grumpy
1075 return ERR_PTR(-EINVAL);
1077 ret = msm_gem_new_impl(dev, size, flags, &obj);
1081 msm_obj = to_msm_bo(obj);
1084 struct msm_gem_vma *vma;
1085 struct page **pages;
1087 mutex_lock(&msm_obj->lock);
1089 vma = add_vma(obj, NULL);
1090 mutex_unlock(&msm_obj->lock);
1096 to_msm_bo(obj)->vram_node = &vma->node;
1098 drm_gem_private_object_init(dev, obj, size);
1100 pages = get_pages(obj);
1101 if (IS_ERR(pages)) {
1102 ret = PTR_ERR(pages);
1106 vma->iova = physaddr(obj);
1108 ret = drm_gem_object_init(dev, obj, size);
1112 * Our buffers are kept pinned, so allocating them from the
1113 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1114 * See comments above new_inode() why this is required _and_
1115 * expected if you're going to pin these pages.
1117 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1120 if (struct_mutex_locked) {
1121 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1122 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1124 mutex_lock(&dev->struct_mutex);
1125 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1126 mutex_unlock(&dev->struct_mutex);
1132 drm_gem_object_put(obj);
1133 return ERR_PTR(ret);
1136 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1137 uint32_t size, uint32_t flags)
1139 return _msm_gem_new(dev, size, flags, true);
1142 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1143 uint32_t size, uint32_t flags)
1145 return _msm_gem_new(dev, size, flags, false);
1148 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1149 struct dma_buf *dmabuf, struct sg_table *sgt)
1151 struct msm_drm_private *priv = dev->dev_private;
1152 struct msm_gem_object *msm_obj;
1153 struct drm_gem_object *obj;
1157 /* if we don't have IOMMU, don't bother pretending we can import: */
1158 if (!msm_use_mmu(dev)) {
1159 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1160 return ERR_PTR(-EINVAL);
1163 size = PAGE_ALIGN(dmabuf->size);
1165 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1169 drm_gem_private_object_init(dev, obj, size);
1171 npages = size / PAGE_SIZE;
1173 msm_obj = to_msm_bo(obj);
1174 mutex_lock(&msm_obj->lock);
1176 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1177 if (!msm_obj->pages) {
1178 mutex_unlock(&msm_obj->lock);
1183 ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
1185 mutex_unlock(&msm_obj->lock);
1189 mutex_unlock(&msm_obj->lock);
1191 mutex_lock(&dev->struct_mutex);
1192 list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
1193 mutex_unlock(&dev->struct_mutex);
1198 drm_gem_object_put(obj);
1199 return ERR_PTR(ret);
1202 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1203 uint32_t flags, struct msm_gem_address_space *aspace,
1204 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1207 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1211 return ERR_CAST(obj);
1214 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1219 vaddr = msm_gem_get_vaddr(obj);
1220 if (IS_ERR(vaddr)) {
1221 msm_gem_unpin_iova(obj, aspace);
1222 ret = PTR_ERR(vaddr);
1232 drm_gem_object_put_locked(obj);
1234 drm_gem_object_put(obj);
1236 return ERR_PTR(ret);
1240 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1241 uint32_t flags, struct msm_gem_address_space *aspace,
1242 struct drm_gem_object **bo, uint64_t *iova)
1244 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1247 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1248 uint32_t flags, struct msm_gem_address_space *aspace,
1249 struct drm_gem_object **bo, uint64_t *iova)
1251 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1254 void msm_gem_kernel_put(struct drm_gem_object *bo,
1255 struct msm_gem_address_space *aspace, bool locked)
1257 if (IS_ERR_OR_NULL(bo))
1260 msm_gem_put_vaddr(bo);
1261 msm_gem_unpin_iova(bo, aspace);
1264 drm_gem_object_put_locked(bo);
1266 drm_gem_object_put(bo);
1269 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1271 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1278 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);