1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2018 Noralf Trønnes
6 #include <linux/dma-buf.h>
7 #include <linux/export.h>
8 #include <linux/mutex.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
14 #include <drm/drm_device.h>
15 #include <drm/drm_drv.h>
16 #include <drm/drm_gem_shmem_helper.h>
17 #include <drm/drm_prime.h>
18 #include <drm/drm_print.h>
23 * This library provides helpers for GEM objects backed by shmem buffers
24 * allocated using anonymous pageable memory.
27 static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
28 .free = drm_gem_shmem_free_object,
29 .print_info = drm_gem_shmem_print_info,
30 .pin = drm_gem_shmem_pin,
31 .unpin = drm_gem_shmem_unpin,
32 .get_sg_table = drm_gem_shmem_get_sg_table,
33 .vmap = drm_gem_shmem_vmap,
34 .vunmap = drm_gem_shmem_vunmap,
35 .mmap = drm_gem_shmem_mmap,
38 static struct drm_gem_shmem_object *
39 __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
41 struct drm_gem_shmem_object *shmem;
42 struct drm_gem_object *obj;
45 size = PAGE_ALIGN(size);
47 if (dev->driver->gem_create_object)
48 obj = dev->driver->gem_create_object(dev, size);
50 obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
52 return ERR_PTR(-ENOMEM);
55 obj->funcs = &drm_gem_shmem_funcs;
58 drm_gem_private_object_init(dev, obj, size);
60 ret = drm_gem_object_init(dev, obj, size);
64 ret = drm_gem_create_mmap_offset(obj);
68 shmem = to_drm_gem_shmem_obj(obj);
69 mutex_init(&shmem->pages_lock);
70 mutex_init(&shmem->vmap_lock);
71 INIT_LIST_HEAD(&shmem->madv_list);
75 * Our buffers are kept pinned, so allocating them
76 * from the MOVABLE zone is a really bad idea, and
77 * conflicts with CMA. See comments above new_inode()
78 * why this is required _and_ expected if you're
79 * going to pin these pages.
81 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
82 __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
88 drm_gem_object_release(obj);
95 * drm_gem_shmem_create - Allocate an object with the given size
97 * @size: Size of the object to allocate
99 * This function creates a shmem GEM object.
102 * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
103 * error code on failure.
105 struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
107 return __drm_gem_shmem_create(dev, size, false);
109 EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
112 * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
113 * @obj: GEM object to free
115 * This function cleans up the GEM object state and frees the memory used to
116 * store the object itself. It should be used to implement
117 * &drm_gem_object_funcs.free.
119 void drm_gem_shmem_free_object(struct drm_gem_object *obj)
121 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
123 WARN_ON(shmem->vmap_use_count);
125 if (obj->import_attach) {
126 drm_prime_gem_destroy(obj, shmem->sgt);
129 dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
130 shmem->sgt->nents, DMA_BIDIRECTIONAL);
131 sg_free_table(shmem->sgt);
135 drm_gem_shmem_put_pages(shmem);
138 WARN_ON(shmem->pages_use_count);
140 drm_gem_object_release(obj);
141 mutex_destroy(&shmem->pages_lock);
142 mutex_destroy(&shmem->vmap_lock);
145 EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
147 static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
149 struct drm_gem_object *obj = &shmem->base;
152 if (shmem->pages_use_count++ > 0)
155 pages = drm_gem_get_pages(obj);
157 DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
158 shmem->pages_use_count = 0;
159 return PTR_ERR(pages);
162 shmem->pages = pages;
168 * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
169 * @shmem: shmem GEM object
171 * This function makes sure that backing pages exists for the shmem GEM object
172 * and increases the use count.
175 * 0 on success or a negative error code on failure.
177 int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
181 WARN_ON(shmem->base.import_attach);
183 ret = mutex_lock_interruptible(&shmem->pages_lock);
186 ret = drm_gem_shmem_get_pages_locked(shmem);
187 mutex_unlock(&shmem->pages_lock);
191 EXPORT_SYMBOL(drm_gem_shmem_get_pages);
193 static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
195 struct drm_gem_object *obj = &shmem->base;
197 if (WARN_ON_ONCE(!shmem->pages_use_count))
200 if (--shmem->pages_use_count > 0)
203 drm_gem_put_pages(obj, shmem->pages,
204 shmem->pages_mark_dirty_on_put,
205 shmem->pages_mark_accessed_on_put);
210 * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
211 * @shmem: shmem GEM object
213 * This function decreases the use count and puts the backing pages when use drops to zero.
215 void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
217 mutex_lock(&shmem->pages_lock);
218 drm_gem_shmem_put_pages_locked(shmem);
219 mutex_unlock(&shmem->pages_lock);
221 EXPORT_SYMBOL(drm_gem_shmem_put_pages);
224 * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
227 * This function makes sure the backing pages are pinned in memory while the
228 * buffer is exported. It should only be used to implement
229 * &drm_gem_object_funcs.pin.
232 * 0 on success or a negative error code on failure.
234 int drm_gem_shmem_pin(struct drm_gem_object *obj)
236 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
238 WARN_ON(shmem->base.import_attach);
240 return drm_gem_shmem_get_pages(shmem);
242 EXPORT_SYMBOL(drm_gem_shmem_pin);
245 * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
248 * This function removes the requirement that the backing pages are pinned in
249 * memory. It should only be used to implement &drm_gem_object_funcs.unpin.
251 void drm_gem_shmem_unpin(struct drm_gem_object *obj)
253 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
255 WARN_ON(shmem->base.import_attach);
257 drm_gem_shmem_put_pages(shmem);
259 EXPORT_SYMBOL(drm_gem_shmem_unpin);
261 static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
263 struct drm_gem_object *obj = &shmem->base;
266 if (shmem->vmap_use_count++ > 0)
269 if (obj->import_attach) {
270 shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
272 pgprot_t prot = PAGE_KERNEL;
274 ret = drm_gem_shmem_get_pages(shmem);
278 if (!shmem->map_cached)
279 prot = pgprot_writecombine(prot);
280 shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
285 DRM_DEBUG_KMS("Failed to vmap pages\n");
293 if (!obj->import_attach)
294 drm_gem_shmem_put_pages(shmem);
296 shmem->vmap_use_count = 0;
302 * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
303 * @shmem: shmem GEM object
305 * This function makes sure that a contiguous kernel virtual address mapping
306 * exists for the buffer backing the shmem GEM object.
308 * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
309 * also be called by drivers directly, in which case it will hide the
310 * differences between dma-buf imported and natively allocated objects.
312 * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
315 * 0 on success or a negative error code on failure.
317 void *drm_gem_shmem_vmap(struct drm_gem_object *obj)
319 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
323 ret = mutex_lock_interruptible(&shmem->vmap_lock);
326 vaddr = drm_gem_shmem_vmap_locked(shmem);
327 mutex_unlock(&shmem->vmap_lock);
331 EXPORT_SYMBOL(drm_gem_shmem_vmap);
333 static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
335 struct drm_gem_object *obj = &shmem->base;
337 if (WARN_ON_ONCE(!shmem->vmap_use_count))
340 if (--shmem->vmap_use_count > 0)
343 if (obj->import_attach)
344 dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
346 vunmap(shmem->vaddr);
349 drm_gem_shmem_put_pages(shmem);
353 * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
354 * @shmem: shmem GEM object
356 * This function cleans up a kernel virtual address mapping acquired by
357 * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
360 * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
361 * also be called by drivers directly, in which case it will hide the
362 * differences between dma-buf imported and natively allocated objects.
364 void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr)
366 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
368 mutex_lock(&shmem->vmap_lock);
369 drm_gem_shmem_vunmap_locked(shmem);
370 mutex_unlock(&shmem->vmap_lock);
372 EXPORT_SYMBOL(drm_gem_shmem_vunmap);
374 struct drm_gem_shmem_object *
375 drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
376 struct drm_device *dev, size_t size,
379 struct drm_gem_shmem_object *shmem;
382 shmem = drm_gem_shmem_create(dev, size);
387 * Allocate an id of idr table where the obj is registered
388 * and handle has the id what user can see.
390 ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
391 /* drop reference from allocate - handle holds it now. */
392 drm_gem_object_put(&shmem->base);
398 EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
400 /* Update madvise status, returns true if not purged, else
403 int drm_gem_shmem_madvise(struct drm_gem_object *obj, int madv)
405 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
407 mutex_lock(&shmem->pages_lock);
409 if (shmem->madv >= 0)
414 mutex_unlock(&shmem->pages_lock);
418 EXPORT_SYMBOL(drm_gem_shmem_madvise);
420 void drm_gem_shmem_purge_locked(struct drm_gem_object *obj)
422 struct drm_device *dev = obj->dev;
423 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
425 WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
427 dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
428 shmem->sgt->nents, DMA_BIDIRECTIONAL);
429 sg_free_table(shmem->sgt);
433 drm_gem_shmem_put_pages_locked(shmem);
437 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
438 drm_gem_free_mmap_offset(obj);
440 /* Our goal here is to return as much of the memory as
441 * is possible back to the system as we are called from OOM.
442 * To do this we must instruct the shmfs to drop all of its
443 * backing pages, *now*.
445 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
447 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
450 EXPORT_SYMBOL(drm_gem_shmem_purge_locked);
452 bool drm_gem_shmem_purge(struct drm_gem_object *obj)
454 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
456 if (!mutex_trylock(&shmem->pages_lock))
458 drm_gem_shmem_purge_locked(obj);
459 mutex_unlock(&shmem->pages_lock);
463 EXPORT_SYMBOL(drm_gem_shmem_purge);
466 * drm_gem_shmem_create_object_cached - Create a shmem buffer object with
469 * @size: Size of the object to allocate
471 * By default, shmem buffer objects use writecombine mappings. This
472 * function implements struct drm_driver.gem_create_object for shmem
473 * buffer objects with cached mappings.
476 * A struct drm_gem_shmem_object * on success or NULL negative on failure.
478 struct drm_gem_object *
479 drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size)
481 struct drm_gem_shmem_object *shmem;
483 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
486 shmem->map_cached = true;
490 EXPORT_SYMBOL(drm_gem_shmem_create_object_cached);
493 * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
494 * @file: DRM file structure to create the dumb buffer for
498 * This function computes the pitch of the dumb buffer and rounds it up to an
499 * integer number of bytes per pixel. Drivers for hardware that doesn't have
500 * any additional restrictions on the pitch can directly use this function as
501 * their &drm_driver.dumb_create callback.
503 * For hardware with additional restrictions, drivers can adjust the fields
504 * set up by userspace before calling into this function.
507 * 0 on success or a negative error code on failure.
509 int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
510 struct drm_mode_create_dumb *args)
512 u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
513 struct drm_gem_shmem_object *shmem;
515 if (!args->pitch || !args->size) {
516 args->pitch = min_pitch;
517 args->size = args->pitch * args->height;
519 /* ensure sane minimum values */
520 if (args->pitch < min_pitch)
521 args->pitch = min_pitch;
522 if (args->size < args->pitch * args->height)
523 args->size = args->pitch * args->height;
526 shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
528 return PTR_ERR_OR_ZERO(shmem);
530 EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
532 static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
534 struct vm_area_struct *vma = vmf->vma;
535 struct drm_gem_object *obj = vma->vm_private_data;
536 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
537 loff_t num_pages = obj->size >> PAGE_SHIFT;
540 if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages))
541 return VM_FAULT_SIGBUS;
543 page = shmem->pages[vmf->pgoff];
545 return vmf_insert_page(vma, vmf->address, page);
548 static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
550 struct drm_gem_object *obj = vma->vm_private_data;
551 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
554 WARN_ON(shmem->base.import_attach);
556 ret = drm_gem_shmem_get_pages(shmem);
557 WARN_ON_ONCE(ret != 0);
559 drm_gem_vm_open(vma);
562 static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
564 struct drm_gem_object *obj = vma->vm_private_data;
565 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
567 drm_gem_shmem_put_pages(shmem);
568 drm_gem_vm_close(vma);
571 static const struct vm_operations_struct drm_gem_shmem_vm_ops = {
572 .fault = drm_gem_shmem_fault,
573 .open = drm_gem_shmem_vm_open,
574 .close = drm_gem_shmem_vm_close,
578 * drm_gem_shmem_mmap - Memory-map a shmem GEM object
580 * @vma: VMA for the area to be mapped
582 * This function implements an augmented version of the GEM DRM file mmap
583 * operation for shmem objects. Drivers which employ the shmem helpers should
584 * use this function as their &drm_gem_object_funcs.mmap handler.
587 * 0 on success or a negative error code on failure.
589 int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
591 struct drm_gem_shmem_object *shmem;
594 /* Remove the fake offset */
595 vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
597 if (obj->import_attach)
598 return dma_buf_mmap(obj->dma_buf, vma, 0);
600 shmem = to_drm_gem_shmem_obj(obj);
602 ret = drm_gem_shmem_get_pages(shmem);
604 drm_gem_vm_close(vma);
608 vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
609 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
610 if (!shmem->map_cached)
611 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
612 vma->vm_ops = &drm_gem_shmem_vm_ops;
616 EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
619 * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
621 * @indent: Tab indentation level
624 * This implements the &drm_gem_object_funcs.info callback.
626 void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
627 const struct drm_gem_object *obj)
629 const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
631 drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
632 drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
633 drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
635 EXPORT_SYMBOL(drm_gem_shmem_print_info);
638 * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
639 * pages for a shmem GEM object
642 * This function exports a scatter/gather table suitable for PRIME usage by
643 * calling the standard DMA mapping API. Drivers should not call this function
644 * directly, instead it should only be used as an implementation for
645 * &drm_gem_object_funcs.get_sg_table.
647 * Drivers who need to acquire an scatter/gather table for objects need to call
648 * drm_gem_shmem_get_pages_sgt() instead.
651 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
653 struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
655 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
657 WARN_ON(shmem->base.import_attach);
659 return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
661 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
664 * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
665 * scatter/gather table for a shmem GEM object.
668 * This function returns a scatter/gather table suitable for driver usage. If
669 * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
672 * This is the main function for drivers to get at backing storage, and it hides
673 * and difference between dma-buf imported and natively allocated objects.
674 * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
677 * A pointer to the scatter/gather table of pinned pages or errno on failure.
679 struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
682 struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
683 struct sg_table *sgt;
688 WARN_ON(obj->import_attach);
690 ret = drm_gem_shmem_get_pages(shmem);
694 sgt = drm_gem_shmem_get_sg_table(&shmem->base);
699 /* Map the pages for use by the h/w. */
700 dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
707 drm_gem_shmem_put_pages(shmem);
710 EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
713 * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
714 * another driver's scatter/gather table of pinned pages
715 * @dev: Device to import into
716 * @attach: DMA-BUF attachment
717 * @sgt: Scatter/gather table of pinned pages
719 * This function imports a scatter/gather table exported via DMA-BUF by
720 * another driver. Drivers that use the shmem helpers should set this as their
721 * &drm_driver.gem_prime_import_sg_table callback.
724 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
725 * error code on failure.
727 struct drm_gem_object *
728 drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
729 struct dma_buf_attachment *attach,
730 struct sg_table *sgt)
732 size_t size = PAGE_ALIGN(attach->dmabuf->size);
733 struct drm_gem_shmem_object *shmem;
735 shmem = __drm_gem_shmem_create(dev, size, true);
737 return ERR_CAST(shmem);
741 DRM_DEBUG_PRIME("size = %zu\n", size);
745 EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);