1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 #include <linux/dma-map-ops.h>
8 #include <linux/spinlock.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/dma-buf.h>
11 #include <linux/pfn_t.h>
13 #include <drm/drm_prime.h>
16 #include "msm_fence.h"
21 static void update_inactive(struct msm_gem_object *msm_obj);
23 static dma_addr_t physaddr(struct drm_gem_object *obj)
25 struct msm_gem_object *msm_obj = to_msm_bo(obj);
26 struct msm_drm_private *priv = obj->dev->dev_private;
27 return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) +
31 static bool use_pages(struct drm_gem_object *obj)
33 struct msm_gem_object *msm_obj = to_msm_bo(obj);
34 return !msm_obj->vram_node;
38 * Cache sync.. this is a bit over-complicated, to fit dma-mapping
39 * API. Really GPU cache is out of scope here (handled on cmdstream)
40 * and all we need to do is invalidate newly allocated pages before
41 * mapping to CPU as uncached/writecombine.
43 * On top of this, we have the added headache, that depending on
44 * display generation, the display's iommu may be wired up to either
45 * the toplevel drm device (mdss), or to the mdp sub-node, meaning
46 * that here we either have dma-direct or iommu ops.
48 * Let this be a cautionary tail of abstraction gone wrong.
51 static void sync_for_device(struct msm_gem_object *msm_obj)
53 struct device *dev = msm_obj->base.dev->dev;
55 dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
58 static void sync_for_cpu(struct msm_gem_object *msm_obj)
60 struct device *dev = msm_obj->base.dev->dev;
62 dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
65 /* allocate pages from VRAM carveout, used when no IOMMU: */
66 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
68 struct msm_gem_object *msm_obj = to_msm_bo(obj);
69 struct msm_drm_private *priv = obj->dev->dev_private;
74 p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
76 return ERR_PTR(-ENOMEM);
78 spin_lock(&priv->vram.lock);
79 ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages);
80 spin_unlock(&priv->vram.lock);
86 paddr = physaddr(obj);
87 for (i = 0; i < npages; i++) {
88 p[i] = phys_to_page(paddr);
95 static struct page **get_pages(struct drm_gem_object *obj)
97 struct msm_gem_object *msm_obj = to_msm_bo(obj);
99 GEM_WARN_ON(!msm_gem_is_locked(obj));
101 if (!msm_obj->pages) {
102 struct drm_device *dev = obj->dev;
104 int npages = obj->size >> PAGE_SHIFT;
107 p = drm_gem_get_pages(obj);
109 p = get_pages_vram(obj, npages);
112 DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n",
119 msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
120 if (IS_ERR(msm_obj->sgt)) {
121 void *ptr = ERR_CAST(msm_obj->sgt);
123 DRM_DEV_ERROR(dev->dev, "failed to allocate sgt\n");
128 /* For non-cached buffers, ensure the new pages are clean
129 * because display controller, GPU, etc. are not coherent:
131 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
132 sync_for_device(msm_obj);
134 GEM_WARN_ON(msm_obj->active_count);
135 update_inactive(msm_obj);
138 return msm_obj->pages;
141 static void put_pages_vram(struct drm_gem_object *obj)
143 struct msm_gem_object *msm_obj = to_msm_bo(obj);
144 struct msm_drm_private *priv = obj->dev->dev_private;
146 spin_lock(&priv->vram.lock);
147 drm_mm_remove_node(msm_obj->vram_node);
148 spin_unlock(&priv->vram.lock);
150 kvfree(msm_obj->pages);
153 static void put_pages(struct drm_gem_object *obj)
155 struct msm_gem_object *msm_obj = to_msm_bo(obj);
157 if (msm_obj->pages) {
159 /* For non-cached buffers, ensure the new
160 * pages are clean because display controller,
161 * GPU, etc. are not coherent:
163 if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
164 sync_for_cpu(msm_obj);
166 sg_free_table(msm_obj->sgt);
172 drm_gem_put_pages(obj, msm_obj->pages, true, false);
176 msm_obj->pages = NULL;
180 struct page **msm_gem_get_pages(struct drm_gem_object *obj)
182 struct msm_gem_object *msm_obj = to_msm_bo(obj);
187 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
189 return ERR_PTR(-EBUSY);
195 msm_obj->pin_count++;
196 update_inactive(msm_obj);
203 void msm_gem_put_pages(struct drm_gem_object *obj)
205 struct msm_gem_object *msm_obj = to_msm_bo(obj);
208 msm_obj->pin_count--;
209 GEM_WARN_ON(msm_obj->pin_count < 0);
210 update_inactive(msm_obj);
214 int msm_gem_mmap_obj(struct drm_gem_object *obj,
215 struct vm_area_struct *vma)
217 struct msm_gem_object *msm_obj = to_msm_bo(obj);
219 vma->vm_flags &= ~VM_PFNMAP;
220 vma->vm_flags |= VM_MIXEDMAP;
222 if (msm_obj->flags & MSM_BO_WC) {
223 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
224 } else if (msm_obj->flags & MSM_BO_UNCACHED) {
225 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
228 * Shunt off cached objs to shmem file so they have their own
229 * address_space (so unmap_mapping_range does what we want,
230 * in particular in the case of mmap'd dmabufs)
233 vma_set_file(vma, obj->filp);
235 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
241 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
245 ret = drm_gem_mmap(filp, vma);
247 DBG("mmap failed: %d", ret);
251 return msm_gem_mmap_obj(vma->vm_private_data, vma);
254 static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
256 struct vm_area_struct *vma = vmf->vma;
257 struct drm_gem_object *obj = vma->vm_private_data;
258 struct msm_gem_object *msm_obj = to_msm_bo(obj);
266 * vm_ops.open/drm_gem_mmap_obj and close get and put
267 * a reference on obj. So, we dont need to hold one here.
269 err = msm_gem_lock_interruptible(obj);
271 ret = VM_FAULT_NOPAGE;
275 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
277 return VM_FAULT_SIGBUS;
280 /* make sure we have pages attached now */
281 pages = get_pages(obj);
283 ret = vmf_error(PTR_ERR(pages));
287 /* We don't use vmf->pgoff since that has the fake offset: */
288 pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
290 pfn = page_to_pfn(pages[pgoff]);
292 VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
293 pfn, pfn << PAGE_SHIFT);
295 ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
302 /** get mmap offset */
303 static uint64_t mmap_offset(struct drm_gem_object *obj)
305 struct drm_device *dev = obj->dev;
308 GEM_WARN_ON(!msm_gem_is_locked(obj));
310 /* Make it mmapable */
311 ret = drm_gem_create_mmap_offset(obj);
314 DRM_DEV_ERROR(dev->dev, "could not allocate mmap offset\n");
318 return drm_vma_node_offset_addr(&obj->vma_node);
321 uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
326 offset = mmap_offset(obj);
331 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
332 struct msm_gem_address_space *aspace)
334 struct msm_gem_object *msm_obj = to_msm_bo(obj);
335 struct msm_gem_vma *vma;
337 GEM_WARN_ON(!msm_gem_is_locked(obj));
339 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
341 return ERR_PTR(-ENOMEM);
343 vma->aspace = aspace;
345 list_add_tail(&vma->list, &msm_obj->vmas);
350 static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj,
351 struct msm_gem_address_space *aspace)
353 struct msm_gem_object *msm_obj = to_msm_bo(obj);
354 struct msm_gem_vma *vma;
356 GEM_WARN_ON(!msm_gem_is_locked(obj));
358 list_for_each_entry(vma, &msm_obj->vmas, list) {
359 if (vma->aspace == aspace)
366 static void del_vma(struct msm_gem_vma *vma)
371 list_del(&vma->list);
376 * If close is true, this also closes the VMA (releasing the allocated
377 * iova range) in addition to removing the iommu mapping. In the eviction
378 * case (!close), we keep the iova allocated, but only remove the iommu
382 put_iova_spaces(struct drm_gem_object *obj, bool close)
384 struct msm_gem_object *msm_obj = to_msm_bo(obj);
385 struct msm_gem_vma *vma;
387 GEM_WARN_ON(!msm_gem_is_locked(obj));
389 list_for_each_entry(vma, &msm_obj->vmas, list) {
391 msm_gem_purge_vma(vma->aspace, vma);
393 msm_gem_close_vma(vma->aspace, vma);
398 /* Called with msm_obj locked */
400 put_iova_vmas(struct drm_gem_object *obj)
402 struct msm_gem_object *msm_obj = to_msm_bo(obj);
403 struct msm_gem_vma *vma, *tmp;
405 GEM_WARN_ON(!msm_gem_is_locked(obj));
407 list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
412 static int get_iova_locked(struct drm_gem_object *obj,
413 struct msm_gem_address_space *aspace, uint64_t *iova,
414 u64 range_start, u64 range_end)
416 struct msm_gem_vma *vma;
419 GEM_WARN_ON(!msm_gem_is_locked(obj));
421 vma = lookup_vma(obj, aspace);
424 vma = add_vma(obj, aspace);
428 ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
429 range_start, range_end);
440 static int msm_gem_pin_iova(struct drm_gem_object *obj,
441 struct msm_gem_address_space *aspace)
443 struct msm_gem_object *msm_obj = to_msm_bo(obj);
444 struct msm_gem_vma *vma;
446 int ret, prot = IOMMU_READ;
448 if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
451 if (msm_obj->flags & MSM_BO_MAP_PRIV)
454 GEM_WARN_ON(!msm_gem_is_locked(obj));
456 if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
459 vma = lookup_vma(obj, aspace);
460 if (GEM_WARN_ON(!vma))
463 pages = get_pages(obj);
465 return PTR_ERR(pages);
467 ret = msm_gem_map_vma(aspace, vma, prot,
468 msm_obj->sgt, obj->size >> PAGE_SHIFT);
471 msm_obj->pin_count++;
476 static int get_and_pin_iova_range_locked(struct drm_gem_object *obj,
477 struct msm_gem_address_space *aspace, uint64_t *iova,
478 u64 range_start, u64 range_end)
483 GEM_WARN_ON(!msm_gem_is_locked(obj));
485 ret = get_iova_locked(obj, aspace, &local,
486 range_start, range_end);
489 ret = msm_gem_pin_iova(obj, aspace);
498 * get iova and pin it. Should have a matching put
499 * limits iova to specified range (in pages)
501 int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
502 struct msm_gem_address_space *aspace, uint64_t *iova,
503 u64 range_start, u64 range_end)
508 ret = get_and_pin_iova_range_locked(obj, aspace, iova, range_start, range_end);
514 int msm_gem_get_and_pin_iova_locked(struct drm_gem_object *obj,
515 struct msm_gem_address_space *aspace, uint64_t *iova)
517 return get_and_pin_iova_range_locked(obj, aspace, iova, 0, U64_MAX);
520 /* get iova and pin it. Should have a matching put */
521 int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
522 struct msm_gem_address_space *aspace, uint64_t *iova)
524 return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
528 * Get an iova but don't pin it. Doesn't need a put because iovas are currently
529 * valid for the life of the object
531 int msm_gem_get_iova(struct drm_gem_object *obj,
532 struct msm_gem_address_space *aspace, uint64_t *iova)
537 ret = get_iova_locked(obj, aspace, iova, 0, U64_MAX);
543 /* get iova without taking a reference, used in places where you have
544 * already done a 'msm_gem_get_and_pin_iova' or 'msm_gem_get_iova'
546 uint64_t msm_gem_iova(struct drm_gem_object *obj,
547 struct msm_gem_address_space *aspace)
549 struct msm_gem_vma *vma;
552 vma = lookup_vma(obj, aspace);
556 return vma ? vma->iova : 0;
560 * Locked variant of msm_gem_unpin_iova()
562 void msm_gem_unpin_iova_locked(struct drm_gem_object *obj,
563 struct msm_gem_address_space *aspace)
565 struct msm_gem_object *msm_obj = to_msm_bo(obj);
566 struct msm_gem_vma *vma;
568 GEM_WARN_ON(!msm_gem_is_locked(obj));
570 vma = lookup_vma(obj, aspace);
572 if (!GEM_WARN_ON(!vma)) {
573 msm_gem_unmap_vma(aspace, vma);
575 msm_obj->pin_count--;
576 GEM_WARN_ON(msm_obj->pin_count < 0);
578 update_inactive(msm_obj);
583 * Unpin a iova by updating the reference counts. The memory isn't actually
584 * purged until something else (shrinker, mm_notifier, destroy, etc) decides
587 void msm_gem_unpin_iova(struct drm_gem_object *obj,
588 struct msm_gem_address_space *aspace)
591 msm_gem_unpin_iova_locked(obj, aspace);
595 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
596 struct drm_mode_create_dumb *args)
598 args->pitch = align_pitch(args->width, args->bpp);
599 args->size = PAGE_ALIGN(args->pitch * args->height);
600 return msm_gem_new_handle(dev, file, args->size,
601 MSM_BO_SCANOUT | MSM_BO_WC, &args->handle, "dumb");
604 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
605 uint32_t handle, uint64_t *offset)
607 struct drm_gem_object *obj;
610 /* GEM does all our handle to object mapping */
611 obj = drm_gem_object_lookup(file, handle);
617 *offset = msm_gem_mmap_offset(obj);
619 drm_gem_object_put(obj);
625 static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
627 struct msm_gem_object *msm_obj = to_msm_bo(obj);
630 GEM_WARN_ON(!msm_gem_is_locked(obj));
632 if (obj->import_attach)
633 return ERR_PTR(-ENODEV);
635 if (GEM_WARN_ON(msm_obj->madv > madv)) {
636 DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
637 msm_obj->madv, madv);
638 return ERR_PTR(-EBUSY);
641 /* increment vmap_count *before* vmap() call, so shrinker can
642 * check vmap_count (is_vunmapable()) outside of msm_obj lock.
643 * This guarantees that we won't try to msm_gem_vunmap() this
644 * same object from within the vmap() call (while we already
647 msm_obj->vmap_count++;
649 if (!msm_obj->vaddr) {
650 struct page **pages = get_pages(obj);
652 ret = PTR_ERR(pages);
655 msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
656 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
657 if (msm_obj->vaddr == NULL) {
662 update_inactive(msm_obj);
665 return msm_obj->vaddr;
668 msm_obj->vmap_count--;
672 void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
674 return get_vaddr(obj, MSM_MADV_WILLNEED);
677 void *msm_gem_get_vaddr(struct drm_gem_object *obj)
682 ret = msm_gem_get_vaddr_locked(obj);
689 * Don't use this! It is for the very special case of dumping
690 * submits from GPU hangs or faults, were the bo may already
691 * be MSM_MADV_DONTNEED, but we know the buffer is still on the
694 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
696 return get_vaddr(obj, __MSM_MADV_PURGED);
699 void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
701 struct msm_gem_object *msm_obj = to_msm_bo(obj);
703 GEM_WARN_ON(!msm_gem_is_locked(obj));
704 GEM_WARN_ON(msm_obj->vmap_count < 1);
706 msm_obj->vmap_count--;
709 void msm_gem_put_vaddr(struct drm_gem_object *obj)
712 msm_gem_put_vaddr_locked(obj);
716 /* Update madvise status, returns true if not purged, else
719 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
721 struct msm_gem_object *msm_obj = to_msm_bo(obj);
725 if (msm_obj->madv != __MSM_MADV_PURGED)
726 msm_obj->madv = madv;
728 madv = msm_obj->madv;
730 /* If the obj is inactive, we might need to move it
731 * between inactive lists
733 if (msm_obj->active_count == 0)
734 update_inactive(msm_obj);
738 return (madv != __MSM_MADV_PURGED);
741 void msm_gem_purge(struct drm_gem_object *obj)
743 struct drm_device *dev = obj->dev;
744 struct msm_gem_object *msm_obj = to_msm_bo(obj);
746 GEM_WARN_ON(!msm_gem_is_locked(obj));
747 GEM_WARN_ON(!is_purgeable(msm_obj));
749 /* Get rid of any iommu mapping(s): */
750 put_iova_spaces(obj, true);
754 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
760 msm_obj->madv = __MSM_MADV_PURGED;
761 update_inactive(msm_obj);
763 drm_gem_free_mmap_offset(obj);
765 /* Our goal here is to return as much of the memory as
766 * is possible back to the system as we are called from OOM.
767 * To do this we must instruct the shmfs to drop all of its
768 * backing pages, *now*.
770 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
772 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
777 * Unpin the backing pages and make them available to be swapped out.
779 void msm_gem_evict(struct drm_gem_object *obj)
781 struct drm_device *dev = obj->dev;
782 struct msm_gem_object *msm_obj = to_msm_bo(obj);
784 GEM_WARN_ON(!msm_gem_is_locked(obj));
785 GEM_WARN_ON(is_unevictable(msm_obj));
786 GEM_WARN_ON(!msm_obj->evictable);
787 GEM_WARN_ON(msm_obj->active_count);
789 /* Get rid of any iommu mapping(s): */
790 put_iova_spaces(obj, false);
792 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
796 update_inactive(msm_obj);
799 void msm_gem_vunmap(struct drm_gem_object *obj)
801 struct msm_gem_object *msm_obj = to_msm_bo(obj);
803 GEM_WARN_ON(!msm_gem_is_locked(obj));
805 if (!msm_obj->vaddr || GEM_WARN_ON(!is_vunmapable(msm_obj)))
808 vunmap(msm_obj->vaddr);
809 msm_obj->vaddr = NULL;
812 /* must be called before _move_to_active().. */
813 int msm_gem_sync_object(struct drm_gem_object *obj,
814 struct msm_fence_context *fctx, bool exclusive)
816 struct dma_resv_list *fobj;
817 struct dma_fence *fence;
820 fobj = dma_resv_get_list(obj->resv);
821 if (!fobj || (fobj->shared_count == 0)) {
822 fence = dma_resv_get_excl(obj->resv);
823 /* don't need to wait on our own fences, since ring is fifo */
824 if (fence && (fence->context != fctx->context)) {
825 ret = dma_fence_wait(fence, true);
831 if (!exclusive || !fobj)
834 for (i = 0; i < fobj->shared_count; i++) {
835 fence = rcu_dereference_protected(fobj->shared[i],
836 dma_resv_held(obj->resv));
837 if (fence->context != fctx->context) {
838 ret = dma_fence_wait(fence, true);
847 void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
849 struct msm_gem_object *msm_obj = to_msm_bo(obj);
850 struct msm_drm_private *priv = obj->dev->dev_private;
853 GEM_WARN_ON(!msm_gem_is_locked(obj));
854 GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
855 GEM_WARN_ON(msm_obj->dontneed);
856 GEM_WARN_ON(!msm_obj->sgt);
858 if (msm_obj->active_count++ == 0) {
859 mutex_lock(&priv->mm_lock);
860 if (msm_obj->evictable)
861 mark_unevictable(msm_obj);
862 list_del(&msm_obj->mm_list);
863 list_add_tail(&msm_obj->mm_list, &gpu->active_list);
864 mutex_unlock(&priv->mm_lock);
868 void msm_gem_active_put(struct drm_gem_object *obj)
870 struct msm_gem_object *msm_obj = to_msm_bo(obj);
873 GEM_WARN_ON(!msm_gem_is_locked(obj));
875 if (--msm_obj->active_count == 0) {
876 update_inactive(msm_obj);
880 static void update_inactive(struct msm_gem_object *msm_obj)
882 struct msm_drm_private *priv = msm_obj->base.dev->dev_private;
884 GEM_WARN_ON(!msm_gem_is_locked(&msm_obj->base));
886 if (msm_obj->active_count != 0)
889 mutex_lock(&priv->mm_lock);
891 if (msm_obj->dontneed)
892 mark_unpurgeable(msm_obj);
893 if (msm_obj->evictable)
894 mark_unevictable(msm_obj);
896 list_del(&msm_obj->mm_list);
897 if ((msm_obj->madv == MSM_MADV_WILLNEED) && msm_obj->sgt) {
898 list_add_tail(&msm_obj->mm_list, &priv->inactive_willneed);
899 mark_evictable(msm_obj);
900 } else if (msm_obj->madv == MSM_MADV_DONTNEED) {
901 list_add_tail(&msm_obj->mm_list, &priv->inactive_dontneed);
902 mark_purgeable(msm_obj);
904 GEM_WARN_ON((msm_obj->madv != __MSM_MADV_PURGED) && msm_obj->sgt);
905 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
908 mutex_unlock(&priv->mm_lock);
911 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
913 bool write = !!(op & MSM_PREP_WRITE);
914 unsigned long remain =
915 op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
918 ret = dma_resv_wait_timeout_rcu(obj->resv, write,
921 return remain == 0 ? -EBUSY : -ETIMEDOUT;
925 /* TODO cache maintenance */
930 int msm_gem_cpu_fini(struct drm_gem_object *obj)
932 /* TODO cache maintenance */
936 #ifdef CONFIG_DEBUG_FS
937 static void describe_fence(struct dma_fence *fence, const char *type,
940 if (!dma_fence_is_signaled(fence))
941 seq_printf(m, "\t%9s: %s %s seq %llu\n", type,
942 fence->ops->get_driver_name(fence),
943 fence->ops->get_timeline_name(fence),
947 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
948 struct msm_gem_stats *stats)
950 struct msm_gem_object *msm_obj = to_msm_bo(obj);
951 struct dma_resv *robj = obj->resv;
952 struct dma_resv_list *fobj;
953 struct dma_fence *fence;
954 struct msm_gem_vma *vma;
955 uint64_t off = drm_vma_node_start(&obj->vma_node);
961 stats->all.size += obj->size;
963 if (is_active(msm_obj)) {
964 stats->active.count++;
965 stats->active.size += obj->size;
968 if (msm_obj->pages) {
969 stats->resident.count++;
970 stats->resident.size += obj->size;
973 switch (msm_obj->madv) {
974 case __MSM_MADV_PURGED:
975 stats->purged.count++;
976 stats->purged.size += obj->size;
979 case MSM_MADV_DONTNEED:
980 stats->purgeable.count++;
981 stats->purgeable.size += obj->size;
984 case MSM_MADV_WILLNEED:
990 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p",
991 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
992 obj->name, kref_read(&obj->refcount),
993 off, msm_obj->vaddr);
995 seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name);
997 if (!list_empty(&msm_obj->vmas)) {
999 seq_puts(m, " vmas:");
1001 list_for_each_entry(vma, &msm_obj->vmas, list) {
1002 const char *name, *comm;
1004 struct msm_gem_address_space *aspace = vma->aspace;
1005 struct task_struct *task =
1006 get_pid_task(aspace->pid, PIDTYPE_PID);
1008 comm = kstrdup(task->comm, GFP_KERNEL);
1012 name = aspace->name;
1016 seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
1017 name, comm ? ":" : "", comm ? comm : "",
1018 vma->aspace, vma->iova,
1019 vma->mapped ? "mapped" : "unmapped",
1028 fobj = rcu_dereference(robj->fence);
1030 unsigned int i, shared_count = fobj->shared_count;
1032 for (i = 0; i < shared_count; i++) {
1033 fence = rcu_dereference(fobj->shared[i]);
1034 describe_fence(fence, "Shared", m);
1038 fence = rcu_dereference(robj->fence_excl);
1040 describe_fence(fence, "Exclusive", m);
1043 msm_gem_unlock(obj);
1046 void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
1048 struct msm_gem_stats stats = {};
1049 struct msm_gem_object *msm_obj;
1051 seq_puts(m, " flags id ref offset kaddr size madv name\n");
1052 list_for_each_entry(msm_obj, list, node) {
1053 struct drm_gem_object *obj = &msm_obj->base;
1055 msm_gem_describe(obj, m, &stats);
1058 seq_printf(m, "Total: %4d objects, %9zu bytes\n",
1059 stats.all.count, stats.all.size);
1060 seq_printf(m, "Active: %4d objects, %9zu bytes\n",
1061 stats.active.count, stats.active.size);
1062 seq_printf(m, "Resident: %4d objects, %9zu bytes\n",
1063 stats.resident.count, stats.resident.size);
1064 seq_printf(m, "Purgeable: %4d objects, %9zu bytes\n",
1065 stats.purgeable.count, stats.purgeable.size);
1066 seq_printf(m, "Purged: %4d objects, %9zu bytes\n",
1067 stats.purged.count, stats.purged.size);
1071 /* don't call directly! Use drm_gem_object_put_locked() and friends */
1072 void msm_gem_free_object(struct drm_gem_object *obj)
1074 struct msm_gem_object *msm_obj = to_msm_bo(obj);
1075 struct drm_device *dev = obj->dev;
1076 struct msm_drm_private *priv = dev->dev_private;
1078 mutex_lock(&priv->obj_lock);
1079 list_del(&msm_obj->node);
1080 mutex_unlock(&priv->obj_lock);
1082 mutex_lock(&priv->mm_lock);
1083 if (msm_obj->dontneed)
1084 mark_unpurgeable(msm_obj);
1085 list_del(&msm_obj->mm_list);
1086 mutex_unlock(&priv->mm_lock);
1090 /* object should not be on active list: */
1091 GEM_WARN_ON(is_active(msm_obj));
1093 put_iova_spaces(obj, true);
1095 if (obj->import_attach) {
1096 GEM_WARN_ON(msm_obj->vaddr);
1098 /* Don't drop the pages for imported dmabuf, as they are not
1099 * ours, just free the array we allocated:
1101 kvfree(msm_obj->pages);
1105 /* dma_buf_detach() grabs resv lock, so we need to unlock
1106 * prior to drm_prime_gem_destroy
1108 msm_gem_unlock(obj);
1110 drm_prime_gem_destroy(obj, msm_obj->sgt);
1112 msm_gem_vunmap(obj);
1115 msm_gem_unlock(obj);
1118 drm_gem_object_release(obj);
1123 /* convenience method to construct a GEM buffer object, and userspace handle */
1124 int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1125 uint32_t size, uint32_t flags, uint32_t *handle,
1128 struct drm_gem_object *obj;
1131 obj = msm_gem_new(dev, size, flags);
1134 return PTR_ERR(obj);
1137 msm_gem_object_set_name(obj, "%s", name);
1139 ret = drm_gem_handle_create(file, obj, handle);
1141 /* drop reference from allocate - handle holds it now */
1142 drm_gem_object_put(obj);
1147 static const struct vm_operations_struct vm_ops = {
1148 .fault = msm_gem_fault,
1149 .open = drm_gem_vm_open,
1150 .close = drm_gem_vm_close,
1153 static const struct drm_gem_object_funcs msm_gem_object_funcs = {
1154 .free = msm_gem_free_object,
1155 .pin = msm_gem_prime_pin,
1156 .unpin = msm_gem_prime_unpin,
1157 .get_sg_table = msm_gem_prime_get_sg_table,
1158 .vmap = msm_gem_prime_vmap,
1159 .vunmap = msm_gem_prime_vunmap,
1163 static int msm_gem_new_impl(struct drm_device *dev,
1164 uint32_t size, uint32_t flags,
1165 struct drm_gem_object **obj)
1167 struct msm_gem_object *msm_obj;
1169 switch (flags & MSM_BO_CACHE_MASK) {
1170 case MSM_BO_UNCACHED:
1175 DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
1176 (flags & MSM_BO_CACHE_MASK));
1180 msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
1184 msm_obj->flags = flags;
1185 msm_obj->madv = MSM_MADV_WILLNEED;
1187 INIT_LIST_HEAD(&msm_obj->submit_entry);
1188 INIT_LIST_HEAD(&msm_obj->vmas);
1190 *obj = &msm_obj->base;
1191 (*obj)->funcs = &msm_gem_object_funcs;
1196 static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
1197 uint32_t size, uint32_t flags, bool struct_mutex_locked)
1199 struct msm_drm_private *priv = dev->dev_private;
1200 struct msm_gem_object *msm_obj;
1201 struct drm_gem_object *obj = NULL;
1202 bool use_vram = false;
1205 size = PAGE_ALIGN(size);
1207 if (!msm_use_mmu(dev))
1209 else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size)
1212 if (GEM_WARN_ON(use_vram && !priv->vram.size))
1213 return ERR_PTR(-EINVAL);
1215 /* Disallow zero sized objects as they make the underlying
1216 * infrastructure grumpy
1219 return ERR_PTR(-EINVAL);
1221 ret = msm_gem_new_impl(dev, size, flags, &obj);
1225 msm_obj = to_msm_bo(obj);
1228 struct msm_gem_vma *vma;
1229 struct page **pages;
1231 drm_gem_private_object_init(dev, obj, size);
1235 vma = add_vma(obj, NULL);
1236 msm_gem_unlock(obj);
1242 to_msm_bo(obj)->vram_node = &vma->node;
1244 /* Call chain get_pages() -> update_inactive() tries to
1245 * access msm_obj->mm_list, but it is not initialized yet.
1246 * To avoid NULL pointer dereference error, initialize
1247 * mm_list to be empty.
1249 INIT_LIST_HEAD(&msm_obj->mm_list);
1252 pages = get_pages(obj);
1253 msm_gem_unlock(obj);
1254 if (IS_ERR(pages)) {
1255 ret = PTR_ERR(pages);
1259 vma->iova = physaddr(obj);
1261 ret = drm_gem_object_init(dev, obj, size);
1265 * Our buffers are kept pinned, so allocating them from the
1266 * MOVABLE zone is a really bad idea, and conflicts with CMA.
1267 * See comments above new_inode() why this is required _and_
1268 * expected if you're going to pin these pages.
1270 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
1273 mutex_lock(&priv->mm_lock);
1274 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1275 mutex_unlock(&priv->mm_lock);
1277 mutex_lock(&priv->obj_lock);
1278 list_add_tail(&msm_obj->node, &priv->objects);
1279 mutex_unlock(&priv->obj_lock);
1284 if (struct_mutex_locked) {
1285 drm_gem_object_put_locked(obj);
1287 drm_gem_object_put(obj);
1289 return ERR_PTR(ret);
1292 struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
1293 uint32_t size, uint32_t flags)
1295 return _msm_gem_new(dev, size, flags, true);
1298 struct drm_gem_object *msm_gem_new(struct drm_device *dev,
1299 uint32_t size, uint32_t flags)
1301 return _msm_gem_new(dev, size, flags, false);
1304 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
1305 struct dma_buf *dmabuf, struct sg_table *sgt)
1307 struct msm_drm_private *priv = dev->dev_private;
1308 struct msm_gem_object *msm_obj;
1309 struct drm_gem_object *obj;
1313 /* if we don't have IOMMU, don't bother pretending we can import: */
1314 if (!msm_use_mmu(dev)) {
1315 DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n");
1316 return ERR_PTR(-EINVAL);
1319 size = PAGE_ALIGN(dmabuf->size);
1321 ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
1325 drm_gem_private_object_init(dev, obj, size);
1327 npages = size / PAGE_SIZE;
1329 msm_obj = to_msm_bo(obj);
1332 msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
1333 if (!msm_obj->pages) {
1334 msm_gem_unlock(obj);
1339 ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages);
1341 msm_gem_unlock(obj);
1345 msm_gem_unlock(obj);
1347 mutex_lock(&priv->mm_lock);
1348 list_add_tail(&msm_obj->mm_list, &priv->inactive_unpinned);
1349 mutex_unlock(&priv->mm_lock);
1351 mutex_lock(&priv->obj_lock);
1352 list_add_tail(&msm_obj->node, &priv->objects);
1353 mutex_unlock(&priv->obj_lock);
1358 drm_gem_object_put(obj);
1359 return ERR_PTR(ret);
1362 static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1363 uint32_t flags, struct msm_gem_address_space *aspace,
1364 struct drm_gem_object **bo, uint64_t *iova, bool locked)
1367 struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
1371 return ERR_CAST(obj);
1374 ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
1379 vaddr = msm_gem_get_vaddr(obj);
1380 if (IS_ERR(vaddr)) {
1381 msm_gem_unpin_iova(obj, aspace);
1382 ret = PTR_ERR(vaddr);
1392 drm_gem_object_put_locked(obj);
1394 drm_gem_object_put(obj);
1396 return ERR_PTR(ret);
1400 void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
1401 uint32_t flags, struct msm_gem_address_space *aspace,
1402 struct drm_gem_object **bo, uint64_t *iova)
1404 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
1407 void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
1408 uint32_t flags, struct msm_gem_address_space *aspace,
1409 struct drm_gem_object **bo, uint64_t *iova)
1411 return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
1414 void msm_gem_kernel_put(struct drm_gem_object *bo,
1415 struct msm_gem_address_space *aspace, bool locked)
1417 if (IS_ERR_OR_NULL(bo))
1420 msm_gem_put_vaddr(bo);
1421 msm_gem_unpin_iova(bo, aspace);
1424 drm_gem_object_put_locked(bo);
1426 drm_gem_object_put(bo);
1429 void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
1431 struct msm_gem_object *msm_obj = to_msm_bo(bo);
1438 vsnprintf(msm_obj->name, sizeof(msm_obj->name), fmt, ap);