2 * drm gem CMA (contiguous memory allocator) helper functions
4 * Copyright (C) 2012 Sascha Hauer, Pengutronix
6 * Based on Samsung Exynos code
8 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
20 #include <linux/dma-buf.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/export.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
28 #include <drm/drm_device.h>
29 #include <drm/drm_drv.h>
30 #include <drm/drm_gem_cma_helper.h>
31 #include <drm/drm_vma_manager.h>
36 * The Contiguous Memory Allocator reserves a pool of memory at early boot
37 * that is used to service requests for large blocks of contiguous memory.
39 * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
40 * objects that are physically contiguous in memory. This is useful for
41 * display drivers that are unable to map scattered buffers via an IOMMU.
45 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
47 * @size: size of the object to allocate
49 * This function creates and initializes a GEM CMA object of the given size,
50 * but doesn't allocate any memory to back the object.
53 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
54 * error code on failure.
56 static struct drm_gem_cma_object *
57 __drm_gem_cma_create(struct drm_device *drm, size_t size)
59 struct drm_gem_cma_object *cma_obj;
60 struct drm_gem_object *gem_obj;
63 if (drm->driver->gem_create_object)
64 gem_obj = drm->driver->gem_create_object(drm, size);
66 gem_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
68 return ERR_PTR(-ENOMEM);
69 cma_obj = container_of(gem_obj, struct drm_gem_cma_object, base);
71 ret = drm_gem_object_init(drm, gem_obj, size);
75 ret = drm_gem_create_mmap_offset(gem_obj);
77 drm_gem_object_release(gem_obj);
89 * drm_gem_cma_create - allocate an object with the given size
91 * @size: size of the object to allocate
93 * This function creates a CMA GEM object and allocates a contiguous chunk of
94 * memory as backing store. The backing memory has the writecombine attribute
98 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
99 * error code on failure.
101 struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
104 struct drm_gem_cma_object *cma_obj;
107 size = round_up(size, PAGE_SIZE);
109 cma_obj = __drm_gem_cma_create(drm, size);
113 cma_obj->vaddr = dma_alloc_wc(drm->dev, size, &cma_obj->paddr,
114 GFP_KERNEL | __GFP_NOWARN);
115 if (!cma_obj->vaddr) {
116 dev_dbg(drm->dev, "failed to allocate buffer with size %zu\n",
125 drm_gem_object_put_unlocked(&cma_obj->base);
128 EXPORT_SYMBOL_GPL(drm_gem_cma_create);
131 * drm_gem_cma_create_with_handle - allocate an object with the given size and
132 * return a GEM handle to it
133 * @file_priv: DRM file-private structure to register the handle for
135 * @size: size of the object to allocate
136 * @handle: return location for the GEM handle
138 * This function creates a CMA GEM object, allocating a physically contiguous
139 * chunk of memory as backing store. The GEM object is then added to the list
140 * of object associated with the given file and a handle to it is returned.
143 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
144 * error code on failure.
146 static struct drm_gem_cma_object *
147 drm_gem_cma_create_with_handle(struct drm_file *file_priv,
148 struct drm_device *drm, size_t size,
151 struct drm_gem_cma_object *cma_obj;
152 struct drm_gem_object *gem_obj;
155 cma_obj = drm_gem_cma_create(drm, size);
159 gem_obj = &cma_obj->base;
162 * allocate a id of idr table where the obj is registered
163 * and handle has the id what user can see.
165 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
166 /* drop reference from allocate - handle holds it now. */
167 drm_gem_object_put_unlocked(gem_obj);
175 * drm_gem_cma_free_object - free resources associated with a CMA GEM object
176 * @gem_obj: GEM object to free
178 * This function frees the backing memory of the CMA GEM object, cleans up the
179 * GEM object state and frees the memory used to store the object itself.
180 * If the buffer is imported and the virtual address is set, it is released.
181 * Drivers using the CMA helpers should set this as their
182 * &drm_driver.gem_free_object_unlocked callback.
184 void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
186 struct drm_gem_cma_object *cma_obj;
188 cma_obj = to_drm_gem_cma_obj(gem_obj);
190 if (gem_obj->import_attach) {
192 dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
193 drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
194 } else if (cma_obj->vaddr) {
195 dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
196 cma_obj->vaddr, cma_obj->paddr);
199 drm_gem_object_release(gem_obj);
203 EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
206 * drm_gem_cma_dumb_create_internal - create a dumb buffer object
207 * @file_priv: DRM file-private structure to create the dumb buffer for
211 * This aligns the pitch and size arguments to the minimum required. This is
212 * an internal helper that can be wrapped by a driver to account for hardware
213 * with more specific alignment requirements. It should not be used directly
214 * as their &drm_driver.dumb_create callback.
217 * 0 on success or a negative error code on failure.
219 int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
220 struct drm_device *drm,
221 struct drm_mode_create_dumb *args)
223 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
224 struct drm_gem_cma_object *cma_obj;
226 if (args->pitch < min_pitch)
227 args->pitch = min_pitch;
229 if (args->size < args->pitch * args->height)
230 args->size = args->pitch * args->height;
232 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
234 return PTR_ERR_OR_ZERO(cma_obj);
236 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
239 * drm_gem_cma_dumb_create - create a dumb buffer object
240 * @file_priv: DRM file-private structure to create the dumb buffer for
244 * This function computes the pitch of the dumb buffer and rounds it up to an
245 * integer number of bytes per pixel. Drivers for hardware that doesn't have
246 * any additional restrictions on the pitch can directly use this function as
247 * their &drm_driver.dumb_create callback.
249 * For hardware with additional restrictions, drivers can adjust the fields
250 * set up by userspace and pass the IOCTL data along to the
251 * drm_gem_cma_dumb_create_internal() function.
254 * 0 on success or a negative error code on failure.
256 int drm_gem_cma_dumb_create(struct drm_file *file_priv,
257 struct drm_device *drm,
258 struct drm_mode_create_dumb *args)
260 struct drm_gem_cma_object *cma_obj;
262 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
263 args->size = args->pitch * args->height;
265 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
267 return PTR_ERR_OR_ZERO(cma_obj);
269 EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
271 const struct vm_operations_struct drm_gem_cma_vm_ops = {
272 .open = drm_gem_vm_open,
273 .close = drm_gem_vm_close,
275 EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops);
277 static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
278 struct vm_area_struct *vma)
283 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
284 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
287 vma->vm_flags &= ~VM_PFNMAP;
290 ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr,
291 cma_obj->paddr, vma->vm_end - vma->vm_start);
293 drm_gem_vm_close(vma);
299 * drm_gem_cma_mmap - memory-map a CMA GEM object
301 * @vma: VMA for the area to be mapped
303 * This function implements an augmented version of the GEM DRM file mmap
304 * operation for CMA objects: In addition to the usual GEM VMA setup it
305 * immediately faults in the entire object instead of using on-demaind
306 * faulting. Drivers which employ the CMA helpers should use this function
307 * as their ->mmap() handler in the DRM device file's file_operations
310 * Instead of directly referencing this function, drivers should use the
311 * DEFINE_DRM_GEM_CMA_FOPS().macro.
314 * 0 on success or a negative error code on failure.
316 int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
318 struct drm_gem_cma_object *cma_obj;
319 struct drm_gem_object *gem_obj;
322 ret = drm_gem_mmap(filp, vma);
326 gem_obj = vma->vm_private_data;
327 cma_obj = to_drm_gem_cma_obj(gem_obj);
329 return drm_gem_cma_mmap_obj(cma_obj, vma);
331 EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
335 * drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
337 * @addr: memory address
339 * @pgoff: page offset
340 * @flags: memory flags
342 * This function is used in noMMU platforms to propose address mapping
343 * for a given buffer.
344 * It's intended to be used as a direct handler for the struct
345 * &file_operations.get_unmapped_area operation.
348 * mapping address on success or a negative error code on failure.
350 unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
356 struct drm_gem_cma_object *cma_obj;
357 struct drm_gem_object *obj = NULL;
358 struct drm_file *priv = filp->private_data;
359 struct drm_device *dev = priv->minor->dev;
360 struct drm_vma_offset_node *node;
362 if (drm_dev_is_unplugged(dev))
365 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
366 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
370 obj = container_of(node, struct drm_gem_object, vma_node);
372 * When the object is being freed, after it hits 0-refcnt it
373 * proceeds to tear down the object. In the process it will
374 * attempt to remove the VMA offset and so acquire this
375 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
376 * that matches our range, we know it is in the process of being
377 * destroyed and will be freed as soon as we release the lock -
378 * so we have to check for the 0-refcnted object and treat it as
381 if (!kref_get_unless_zero(&obj->refcount))
385 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
390 if (!drm_vma_node_is_allowed(node, priv)) {
391 drm_gem_object_put_unlocked(obj);
395 cma_obj = to_drm_gem_cma_obj(obj);
397 drm_gem_object_put_unlocked(obj);
399 return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
401 EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
405 * drm_gem_cma_print_info() - Print &drm_gem_cma_object info for debugfs
407 * @indent: Tab indentation level
410 * This function can be used as the &drm_driver->gem_print_info callback.
411 * It prints paddr and vaddr for use in e.g. debugfs output.
413 void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent,
414 const struct drm_gem_object *obj)
416 const struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
418 drm_printf_indent(p, indent, "paddr=%pad\n", &cma_obj->paddr);
419 drm_printf_indent(p, indent, "vaddr=%p\n", cma_obj->vaddr);
421 EXPORT_SYMBOL(drm_gem_cma_print_info);
424 * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
425 * pages for a CMA GEM object
428 * This function exports a scatter/gather table suitable for PRIME usage by
429 * calling the standard DMA mapping API. Drivers using the CMA helpers should
430 * set this as their &drm_driver.gem_prime_get_sg_table callback.
433 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
435 struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
437 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
438 struct sg_table *sgt;
441 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
443 return ERR_PTR(-ENOMEM);
445 ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
446 cma_obj->paddr, obj->size);
456 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
459 * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
460 * driver's scatter/gather table of pinned pages
461 * @dev: device to import into
462 * @attach: DMA-BUF attachment
463 * @sgt: scatter/gather table of pinned pages
465 * This function imports a scatter/gather table exported via DMA-BUF by
466 * another driver. Imported buffers must be physically contiguous in memory
467 * (i.e. the scatter/gather table must contain a single entry). Drivers that
468 * use the CMA helpers should set this as their
469 * &drm_driver.gem_prime_import_sg_table callback.
472 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
473 * error code on failure.
475 struct drm_gem_object *
476 drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
477 struct dma_buf_attachment *attach,
478 struct sg_table *sgt)
480 struct drm_gem_cma_object *cma_obj;
482 if (sgt->nents != 1) {
483 /* check if the entries in the sg_table are contiguous */
484 dma_addr_t next_addr = sg_dma_address(sgt->sgl);
485 struct scatterlist *s;
488 for_each_sg(sgt->sgl, s, sgt->nents, i) {
490 * sg_dma_address(s) is only valid for entries
491 * that have sg_dma_len(s) != 0
496 if (sg_dma_address(s) != next_addr)
497 return ERR_PTR(-EINVAL);
499 next_addr = sg_dma_address(s) + sg_dma_len(s);
503 /* Create a CMA GEM buffer. */
504 cma_obj = __drm_gem_cma_create(dev, attach->dmabuf->size);
506 return ERR_CAST(cma_obj);
508 cma_obj->paddr = sg_dma_address(sgt->sgl);
511 DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &cma_obj->paddr, attach->dmabuf->size);
513 return &cma_obj->base;
515 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
518 * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
520 * @vma: VMA for the area to be mapped
522 * This function maps a buffer imported via DRM PRIME into a userspace
523 * process's address space. Drivers that use the CMA helpers should set this
524 * as their &drm_driver.gem_prime_mmap callback.
527 * 0 on success or a negative error code on failure.
529 int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
530 struct vm_area_struct *vma)
532 struct drm_gem_cma_object *cma_obj;
535 ret = drm_gem_mmap_obj(obj, obj->size, vma);
539 cma_obj = to_drm_gem_cma_obj(obj);
540 return drm_gem_cma_mmap_obj(cma_obj, vma);
542 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
545 * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
549 * This function maps a buffer exported via DRM PRIME into the kernel's
550 * virtual address space. Since the CMA buffers are already mapped into the
551 * kernel virtual address space this simply returns the cached virtual
552 * address. Drivers using the CMA helpers should set this as their DRM
553 * driver's &drm_driver.gem_prime_vmap callback.
556 * The kernel virtual address of the CMA GEM object's backing store.
558 void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
560 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
562 return cma_obj->vaddr;
564 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
567 * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual
570 * @vaddr: kernel virtual address where the CMA GEM object was mapped
572 * This function removes a buffer exported via DRM PRIME from the kernel's
573 * virtual address space. This is a no-op because CMA buffers cannot be
574 * unmapped from kernel space. Drivers using the CMA helpers should set this
575 * as their &drm_driver.gem_prime_vunmap callback.
577 void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
581 EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);
583 static const struct drm_gem_object_funcs drm_cma_gem_default_funcs = {
584 .free = drm_gem_cma_free_object,
585 .print_info = drm_gem_cma_print_info,
586 .get_sg_table = drm_gem_cma_prime_get_sg_table,
587 .vmap = drm_gem_cma_prime_vmap,
588 .vm_ops = &drm_gem_cma_vm_ops,
592 * drm_cma_gem_create_object_default_funcs - Create a CMA GEM object with a
593 * default function table
595 * @size: Size of the object to allocate
597 * This sets the GEM object functions to the default CMA helper functions.
598 * This function can be used as the &drm_driver.gem_create_object callback.
601 * A pointer to a allocated GEM object or an error pointer on failure.
603 struct drm_gem_object *
604 drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size)
606 struct drm_gem_cma_object *cma_obj;
608 cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
612 cma_obj->base.funcs = &drm_cma_gem_default_funcs;
614 return &cma_obj->base;
616 EXPORT_SYMBOL(drm_cma_gem_create_object_default_funcs);
619 * drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's
620 * scatter/gather table and get the virtual address of the buffer
622 * @attach: DMA-BUF attachment
623 * @sgt: Scatter/gather table of pinned pages
625 * This function imports a scatter/gather table using
626 * drm_gem_cma_prime_import_sg_table() and uses dma_buf_vmap() to get the kernel
627 * virtual address. This ensures that a CMA GEM object always has its virtual
628 * address set. This address is released when the object is freed.
630 * This function can be used as the &drm_driver.gem_prime_import_sg_table
631 * callback. The DRM_GEM_CMA_VMAP_DRIVER_OPS() macro provides a shortcut to set
632 * the necessary DRM driver operations.
635 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
636 * error code on failure.
638 struct drm_gem_object *
639 drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
640 struct dma_buf_attachment *attach,
641 struct sg_table *sgt)
643 struct drm_gem_cma_object *cma_obj;
644 struct drm_gem_object *obj;
647 vaddr = dma_buf_vmap(attach->dmabuf);
649 DRM_ERROR("Failed to vmap PRIME buffer\n");
650 return ERR_PTR(-ENOMEM);
653 obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
655 dma_buf_vunmap(attach->dmabuf, vaddr);
659 cma_obj = to_drm_gem_cma_obj(obj);
660 cma_obj->vaddr = vaddr;
664 EXPORT_SYMBOL(drm_gem_cma_prime_import_sg_table_vmap);