1 // SPDX-License-Identifier: GPL-2.0-only
3 * NVIDIA Tegra DRM GEM helper functions
5 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 * Copyright (C) 2013-2015 NVIDIA CORPORATION, All rights reserved.
8 * Based on the GEM/CMA helpers
10 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
13 #include <linux/dma-buf.h>
14 #include <linux/iommu.h>
16 #include <drm/drm_drv.h>
17 #include <drm/drm_prime.h>
18 #include <drm/tegra_drm.h>
23 static void tegra_bo_put(struct host1x_bo *bo)
25 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
27 drm_gem_object_put(&obj->gem);
30 /* XXX move this into lib/scatterlist.c? */
31 static int sg_alloc_table_from_sg(struct sg_table *sgt, struct scatterlist *sg,
32 unsigned int nents, gfp_t gfp_mask)
34 struct scatterlist *dst;
38 err = sg_alloc_table(sgt, nents, gfp_mask);
44 for (i = 0; i < nents; i++) {
45 sg_set_page(dst, sg_page(sg), sg->length, 0);
53 static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo,
56 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
61 * If we've manually mapped the buffer object through the IOMMU, make
62 * sure to return the IOVA address of our mapping.
64 * Similarly, for buffers that have been allocated by the DMA API the
65 * physical address can be used for devices that are not attached to
66 * an IOMMU. For these devices, callers must pass a valid pointer via
69 * Imported buffers were also already mapped at import time, so the
70 * existing mapping can be reused.
78 * If we don't have a mapping for this buffer yet, return an SG table
79 * so that host1x can do the mapping for us via the DMA API.
81 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
83 return ERR_PTR(-ENOMEM);
87 * If the buffer object was allocated from the explicit IOMMU
88 * API code paths, construct an SG table from the pages.
90 err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages,
91 0, obj->gem.size, GFP_KERNEL);
94 } else if (obj->sgt) {
96 * If the buffer object already has an SG table but no pages
97 * were allocated for it, it means the buffer was imported and
98 * the SG table needs to be copied to avoid overwriting any
99 * other potential users of the original SG table.
101 err = sg_alloc_table_from_sg(sgt, obj->sgt->sgl, obj->sgt->nents,
107 * If the buffer object had no pages allocated and if it was
108 * not imported, it had to be allocated with the DMA API, so
109 * the DMA API helper can be used.
111 err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova,
124 static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
132 static void *tegra_bo_mmap(struct host1x_bo *bo)
134 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
135 struct dma_buf_map map;
140 } else if (obj->gem.import_attach) {
141 ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
142 return ret ? NULL : map.vaddr;
144 return vmap(obj->pages, obj->num_pages, VM_MAP,
145 pgprot_writecombine(PAGE_KERNEL));
149 static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
151 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
152 struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(addr);
156 else if (obj->gem.import_attach)
157 dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map);
162 static struct host1x_bo *tegra_bo_get(struct host1x_bo *bo)
164 struct tegra_bo *obj = host1x_to_tegra_bo(bo);
166 drm_gem_object_get(&obj->gem);
171 static const struct host1x_bo_ops tegra_bo_ops = {
175 .unpin = tegra_bo_unpin,
176 .mmap = tegra_bo_mmap,
177 .munmap = tegra_bo_munmap,
180 static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
182 int prot = IOMMU_READ | IOMMU_WRITE;
188 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
192 mutex_lock(&tegra->mm_lock);
194 err = drm_mm_insert_node_generic(&tegra->mm,
195 bo->mm, bo->gem.size, PAGE_SIZE, 0, 0);
197 dev_err(tegra->drm->dev, "out of I/O virtual memory: %d\n",
202 bo->iova = bo->mm->start;
204 bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl,
205 bo->sgt->nents, prot);
207 dev_err(tegra->drm->dev, "failed to map buffer\n");
212 mutex_unlock(&tegra->mm_lock);
217 drm_mm_remove_node(bo->mm);
219 mutex_unlock(&tegra->mm_lock);
224 static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
229 mutex_lock(&tegra->mm_lock);
230 iommu_unmap(tegra->domain, bo->iova, bo->size);
231 drm_mm_remove_node(bo->mm);
232 mutex_unlock(&tegra->mm_lock);
239 static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
240 .free = tegra_bo_free_object,
241 .export = tegra_gem_prime_export,
242 .vm_ops = &tegra_bo_vm_ops,
245 static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
251 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
253 return ERR_PTR(-ENOMEM);
255 bo->gem.funcs = &tegra_gem_object_funcs;
257 host1x_bo_init(&bo->base, &tegra_bo_ops);
258 size = round_up(size, PAGE_SIZE);
260 err = drm_gem_object_init(drm, &bo->gem, size);
264 err = drm_gem_create_mmap_offset(&bo->gem);
271 drm_gem_object_release(&bo->gem);
277 static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
280 dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
282 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
283 sg_free_table(bo->sgt);
285 } else if (bo->vaddr) {
286 dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova);
290 static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
294 bo->pages = drm_gem_get_pages(&bo->gem);
295 if (IS_ERR(bo->pages))
296 return PTR_ERR(bo->pages);
298 bo->num_pages = bo->gem.size >> PAGE_SHIFT;
300 bo->sgt = drm_prime_pages_to_sg(bo->gem.dev, bo->pages, bo->num_pages);
301 if (IS_ERR(bo->sgt)) {
302 err = PTR_ERR(bo->sgt);
306 err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
316 sg_free_table(bo->sgt);
319 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
323 static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
325 struct tegra_drm *tegra = drm->dev_private;
329 err = tegra_bo_get_pages(drm, bo);
333 err = tegra_bo_iommu_map(tegra, bo);
335 tegra_bo_free(drm, bo);
339 size_t size = bo->gem.size;
341 bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova,
342 GFP_KERNEL | __GFP_NOWARN);
345 "failed to allocate buffer of size %zu\n",
354 struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
360 bo = tegra_bo_alloc_object(drm, size);
364 err = tegra_bo_alloc(drm, bo);
368 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
369 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
371 if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
372 bo->flags |= TEGRA_BO_BOTTOM_UP;
377 drm_gem_object_release(&bo->gem);
382 struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
383 struct drm_device *drm,
391 bo = tegra_bo_create(drm, size, flags);
395 err = drm_gem_handle_create(file, &bo->gem, handle);
397 tegra_bo_free_object(&bo->gem);
401 drm_gem_object_put(&bo->gem);
406 static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
409 struct tegra_drm *tegra = drm->dev_private;
410 struct dma_buf_attachment *attach;
414 bo = tegra_bo_alloc_object(drm, buf->size);
418 attach = dma_buf_attach(buf, drm->dev);
419 if (IS_ERR(attach)) {
420 err = PTR_ERR(attach);
426 bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
427 if (IS_ERR(bo->sgt)) {
428 err = PTR_ERR(bo->sgt);
433 err = tegra_bo_iommu_map(tegra, bo);
438 bo->gem.import_attach = attach;
443 if (!IS_ERR_OR_NULL(bo->sgt))
444 dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
446 dma_buf_detach(buf, attach);
449 drm_gem_object_release(&bo->gem);
454 void tegra_bo_free_object(struct drm_gem_object *gem)
456 struct tegra_drm *tegra = gem->dev->dev_private;
457 struct tegra_bo *bo = to_tegra_bo(gem);
460 tegra_bo_iommu_unmap(tegra, bo);
462 if (gem->import_attach) {
463 dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
465 drm_prime_gem_destroy(gem, NULL);
467 tegra_bo_free(gem->dev, bo);
470 drm_gem_object_release(gem);
474 int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
475 struct drm_mode_create_dumb *args)
477 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
478 struct tegra_drm *tegra = drm->dev_private;
481 args->pitch = round_up(min_pitch, tegra->pitch_align);
482 args->size = args->pitch * args->height;
484 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
492 static vm_fault_t tegra_bo_fault(struct vm_fault *vmf)
494 struct vm_area_struct *vma = vmf->vma;
495 struct drm_gem_object *gem = vma->vm_private_data;
496 struct tegra_bo *bo = to_tegra_bo(gem);
501 return VM_FAULT_SIGBUS;
503 offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
504 page = bo->pages[offset];
506 return vmf_insert_page(vma, vmf->address, page);
509 const struct vm_operations_struct tegra_bo_vm_ops = {
510 .fault = tegra_bo_fault,
511 .open = drm_gem_vm_open,
512 .close = drm_gem_vm_close,
515 int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
517 struct tegra_bo *bo = to_tegra_bo(gem);
520 unsigned long vm_pgoff = vma->vm_pgoff;
524 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(),
525 * and set the vm_pgoff (used as a fake buffer offset by DRM)
526 * to 0 as we want to map the whole buffer.
528 vma->vm_flags &= ~VM_PFNMAP;
531 err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
534 drm_gem_vm_close(vma);
538 vma->vm_pgoff = vm_pgoff;
540 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
542 vma->vm_flags |= VM_MIXEDMAP;
543 vma->vm_flags &= ~VM_PFNMAP;
545 vma->vm_page_prot = pgprot_writecombine(prot);
551 int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
553 struct drm_gem_object *gem;
556 err = drm_gem_mmap(file, vma);
560 gem = vma->vm_private_data;
562 return __tegra_gem_mmap(gem, vma);
565 static struct sg_table *
566 tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
567 enum dma_data_direction dir)
569 struct drm_gem_object *gem = attach->dmabuf->priv;
570 struct tegra_bo *bo = to_tegra_bo(gem);
571 struct sg_table *sgt;
573 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
578 if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages,
579 0, gem->size, GFP_KERNEL) < 0)
582 if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova,
587 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
598 static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
599 struct sg_table *sgt,
600 enum dma_data_direction dir)
602 struct drm_gem_object *gem = attach->dmabuf->priv;
603 struct tegra_bo *bo = to_tegra_bo(gem);
606 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
612 static void tegra_gem_prime_release(struct dma_buf *buf)
614 drm_gem_dmabuf_release(buf);
617 static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
618 enum dma_data_direction direction)
620 struct drm_gem_object *gem = buf->priv;
621 struct tegra_bo *bo = to_tegra_bo(gem);
622 struct drm_device *drm = gem->dev;
625 dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
631 static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
632 enum dma_data_direction direction)
634 struct drm_gem_object *gem = buf->priv;
635 struct tegra_bo *bo = to_tegra_bo(gem);
636 struct drm_device *drm = gem->dev;
639 dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
645 static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
647 struct drm_gem_object *gem = buf->priv;
650 err = drm_gem_mmap_obj(gem, gem->size, vma);
654 return __tegra_gem_mmap(gem, vma);
657 static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map)
659 struct drm_gem_object *gem = buf->priv;
660 struct tegra_bo *bo = to_tegra_bo(gem);
662 dma_buf_map_set_vaddr(map, bo->vaddr);
667 static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct dma_buf_map *map)
671 static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
672 .map_dma_buf = tegra_gem_prime_map_dma_buf,
673 .unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
674 .release = tegra_gem_prime_release,
675 .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
676 .end_cpu_access = tegra_gem_prime_end_cpu_access,
677 .mmap = tegra_gem_prime_mmap,
678 .vmap = tegra_gem_prime_vmap,
679 .vunmap = tegra_gem_prime_vunmap,
682 struct dma_buf *tegra_gem_prime_export(struct drm_gem_object *gem,
685 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
687 exp_info.exp_name = KBUILD_MODNAME;
688 exp_info.owner = gem->dev->driver->fops->owner;
689 exp_info.ops = &tegra_gem_prime_dmabuf_ops;
690 exp_info.size = gem->size;
691 exp_info.flags = flags;
694 return drm_gem_dmabuf_export(gem->dev, &exp_info);
697 struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
702 if (buf->ops == &tegra_gem_prime_dmabuf_ops) {
703 struct drm_gem_object *gem = buf->priv;
705 if (gem->dev == drm) {
706 drm_gem_object_get(gem);
711 bo = tegra_bo_import(drm, buf);