1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5 * Author: Inki Dae <inki.dae@samsung.com>
9 #include <drm/drm_vma_manager.h>
11 #include <linux/shmem_fs.h>
12 #include <linux/dma-buf.h>
13 #include <linux/pfn_t.h>
14 #include <drm/exynos_drm.h>
16 #include "exynos_drm_drv.h"
17 #include "exynos_drm_gem.h"
19 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
21 struct drm_device *dev = exynos_gem->base.dev;
23 unsigned int nr_pages;
27 if (exynos_gem->dma_addr) {
28 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
32 exynos_gem->dma_attrs = 0;
35 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
36 * region will be allocated else physically contiguous
39 if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
40 exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
43 * if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
44 * else cachable mapping.
46 if (exynos_gem->flags & EXYNOS_BO_WC ||
47 !(exynos_gem->flags & EXYNOS_BO_CACHABLE))
48 attr = DMA_ATTR_WRITE_COMBINE;
50 attr = DMA_ATTR_NON_CONSISTENT;
52 exynos_gem->dma_attrs |= attr;
53 exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
55 nr_pages = exynos_gem->size >> PAGE_SHIFT;
57 exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
58 GFP_KERNEL | __GFP_ZERO);
59 if (!exynos_gem->pages) {
60 DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate pages.\n");
64 exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
65 &exynos_gem->dma_addr, GFP_KERNEL,
66 exynos_gem->dma_attrs);
67 if (!exynos_gem->cookie) {
68 DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
72 ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
73 exynos_gem->dma_addr, exynos_gem->size,
74 exynos_gem->dma_attrs);
76 DRM_DEV_ERROR(to_dma_dev(dev), "failed to get sgtable.\n");
80 if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
82 DRM_DEV_ERROR(to_dma_dev(dev), "invalid sgtable.\n");
89 DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
90 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
97 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
98 exynos_gem->dma_addr, exynos_gem->dma_attrs);
100 kvfree(exynos_gem->pages);
105 static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
107 struct drm_device *dev = exynos_gem->base.dev;
109 if (!exynos_gem->dma_addr) {
110 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n");
114 DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n",
115 (unsigned long)exynos_gem->dma_addr, exynos_gem->size);
117 dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
118 (dma_addr_t)exynos_gem->dma_addr,
119 exynos_gem->dma_attrs);
121 kvfree(exynos_gem->pages);
124 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
125 struct drm_file *file_priv,
126 unsigned int *handle)
131 * allocate a id of idr table where the obj is registered
132 * and handle has the id what user can see.
134 ret = drm_gem_handle_create(file_priv, obj, handle);
138 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle);
140 /* drop reference from allocate - handle holds it now. */
141 drm_gem_object_put_unlocked(obj);
146 void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
148 struct drm_gem_object *obj = &exynos_gem->base;
150 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n",
154 * do not release memory region from exporter.
156 * the region will be released by exporter
157 * once dmabuf's refcount becomes 0.
159 if (obj->import_attach)
160 drm_prime_gem_destroy(obj, exynos_gem->sgt);
162 exynos_drm_free_buf(exynos_gem);
164 /* release file pointer to gem object. */
165 drm_gem_object_release(obj);
170 static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
173 struct exynos_drm_gem *exynos_gem;
174 struct drm_gem_object *obj;
177 exynos_gem = kzalloc(sizeof(*exynos_gem), GFP_KERNEL);
179 return ERR_PTR(-ENOMEM);
181 exynos_gem->size = size;
182 obj = &exynos_gem->base;
184 ret = drm_gem_object_init(dev, obj, size);
186 DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
191 ret = drm_gem_create_mmap_offset(obj);
193 drm_gem_object_release(obj);
198 DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp);
203 struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
207 struct exynos_drm_gem *exynos_gem;
210 if (flags & ~(EXYNOS_BO_MASK)) {
211 DRM_DEV_ERROR(dev->dev,
212 "invalid GEM buffer flags: %u\n", flags);
213 return ERR_PTR(-EINVAL);
217 DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size);
218 return ERR_PTR(-EINVAL);
221 size = roundup(size, PAGE_SIZE);
223 exynos_gem = exynos_drm_gem_init(dev, size);
224 if (IS_ERR(exynos_gem))
227 if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
229 * when no IOMMU is available, all allocated buffers are
230 * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
232 flags &= ~EXYNOS_BO_NONCONTIG;
233 DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
236 /* set memory type and cache attribute from user side. */
237 exynos_gem->flags = flags;
239 ret = exynos_drm_alloc_buf(exynos_gem);
241 drm_gem_object_release(&exynos_gem->base);
249 int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
250 struct drm_file *file_priv)
252 struct drm_exynos_gem_create *args = data;
253 struct exynos_drm_gem *exynos_gem;
256 exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
257 if (IS_ERR(exynos_gem))
258 return PTR_ERR(exynos_gem);
260 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
263 exynos_drm_gem_destroy(exynos_gem);
270 int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
271 struct drm_file *file_priv)
273 struct drm_exynos_gem_map *args = data;
275 return drm_gem_dumb_map_offset(file_priv, dev, args->handle,
279 struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
280 unsigned int gem_handle)
282 struct drm_gem_object *obj;
284 obj = drm_gem_object_lookup(filp, gem_handle);
287 return to_exynos_gem(obj);
290 static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
291 struct vm_area_struct *vma)
293 struct drm_device *drm_dev = exynos_gem->base.dev;
294 unsigned long vm_size;
297 vma->vm_flags &= ~VM_PFNMAP;
300 vm_size = vma->vm_end - vma->vm_start;
302 /* check if user-requested size is valid. */
303 if (vm_size > exynos_gem->size)
306 ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
307 exynos_gem->dma_addr, exynos_gem->size,
308 exynos_gem->dma_attrs);
310 DRM_ERROR("failed to mmap.\n");
317 int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
318 struct drm_file *file_priv)
320 struct exynos_drm_gem *exynos_gem;
321 struct drm_exynos_gem_info *args = data;
322 struct drm_gem_object *obj;
324 obj = drm_gem_object_lookup(file_priv, args->handle);
326 DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n");
330 exynos_gem = to_exynos_gem(obj);
332 args->flags = exynos_gem->flags;
333 args->size = exynos_gem->size;
335 drm_gem_object_put_unlocked(obj);
340 void exynos_drm_gem_free_object(struct drm_gem_object *obj)
342 exynos_drm_gem_destroy(to_exynos_gem(obj));
345 int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
346 struct drm_device *dev,
347 struct drm_mode_create_dumb *args)
349 struct exynos_drm_gem *exynos_gem;
354 * allocate memory to be used for framebuffer.
355 * - this callback would be called by user application
356 * with DRM_IOCTL_MODE_CREATE_DUMB command.
359 args->pitch = args->width * ((args->bpp + 7) / 8);
360 args->size = args->pitch * args->height;
362 if (is_drm_iommu_supported(dev))
363 flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
365 flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
367 exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
368 if (IS_ERR(exynos_gem)) {
369 dev_warn(dev->dev, "FB allocation failed.\n");
370 return PTR_ERR(exynos_gem);
373 ret = exynos_drm_gem_handle_create(&exynos_gem->base, file_priv,
376 exynos_drm_gem_destroy(exynos_gem);
383 vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
385 struct vm_area_struct *vma = vmf->vma;
386 struct drm_gem_object *obj = vma->vm_private_data;
387 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
391 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
393 if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
394 DRM_ERROR("invalid page offset\n");
395 return VM_FAULT_SIGBUS;
398 pfn = page_to_pfn(exynos_gem->pages[page_offset]);
399 return vmf_insert_mixed(vma, vmf->address,
400 __pfn_to_pfn_t(pfn, PFN_DEV));
403 static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
404 struct vm_area_struct *vma)
406 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
409 DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
412 /* non-cachable as default. */
413 if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
414 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
415 else if (exynos_gem->flags & EXYNOS_BO_WC)
417 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
420 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
422 ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
429 drm_gem_vm_close(vma);
434 int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
436 struct drm_gem_object *obj;
439 /* set vm_area_struct. */
440 ret = drm_gem_mmap(filp, vma);
442 DRM_ERROR("failed to mmap.\n");
446 obj = vma->vm_private_data;
448 if (obj->import_attach)
449 return dma_buf_mmap(obj->dma_buf, vma, 0);
451 return exynos_drm_gem_mmap_obj(obj, vma);
454 /* low-level interface prime helpers */
455 struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
456 struct dma_buf *dma_buf)
458 return drm_gem_prime_import_dev(dev, dma_buf, to_dma_dev(dev));
461 struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
463 struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
466 npages = exynos_gem->size >> PAGE_SHIFT;
468 return drm_prime_pages_to_sg(exynos_gem->pages, npages);
471 struct drm_gem_object *
472 exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
473 struct dma_buf_attachment *attach,
474 struct sg_table *sgt)
476 struct exynos_drm_gem *exynos_gem;
480 exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
481 if (IS_ERR(exynos_gem)) {
482 ret = PTR_ERR(exynos_gem);
486 exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
488 npages = exynos_gem->size >> PAGE_SHIFT;
489 exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
490 if (!exynos_gem->pages) {
495 ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
500 exynos_gem->sgt = sgt;
502 if (sgt->nents == 1) {
503 /* always physically continuous memory if sgt->nents is 1. */
504 exynos_gem->flags |= EXYNOS_BO_CONTIG;
507 * this case could be CONTIG or NONCONTIG type but for now
509 * TODO. we have to find a way that exporter can notify
510 * the type of its own buffer to importer.
512 exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
515 return &exynos_gem->base;
518 kvfree(exynos_gem->pages);
520 drm_gem_object_release(&exynos_gem->base);
525 void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
530 void exynos_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
535 int exynos_drm_gem_prime_mmap(struct drm_gem_object *obj,
536 struct vm_area_struct *vma)
540 ret = drm_gem_mmap_obj(obj, obj->size, vma);
544 return exynos_drm_gem_mmap_obj(obj, vma);