1 // SPDX-License-Identifier: GPL-2.0-only
3 * Framework for buffer objects that can be shared across devices/subsystems.
5 * Copyright(C) 2011 Linaro Limited. All rights reserved.
6 * Author: Sumit Semwal <sumit.semwal@ti.com>
8 * Many thanks to linaro-mm-sig list, and specially
9 * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10 * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11 * refining of this idea.
15 #include <linux/slab.h>
16 #include <linux/dma-buf.h>
17 #include <linux/dma-fence.h>
18 #include <linux/anon_inodes.h>
19 #include <linux/export.h>
20 #include <linux/debugfs.h>
21 #include <linux/module.h>
22 #include <linux/seq_file.h>
23 #include <linux/poll.h>
24 #include <linux/dma-resv.h>
26 #include <linux/mount.h>
27 #include <linux/pseudo_fs.h>
29 #include <uapi/linux/dma-buf.h>
30 #include <uapi/linux/magic.h>
32 static inline int is_dma_buf_file(struct file *);
35 struct list_head head;
39 static struct dma_buf_list db_list;
41 static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
43 struct dma_buf *dmabuf;
44 char name[DMA_BUF_NAME_LEN];
47 dmabuf = dentry->d_fsdata;
48 spin_lock(&dmabuf->name_lock);
50 ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
51 spin_unlock(&dmabuf->name_lock);
53 return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
54 dentry->d_name.name, ret > 0 ? name : "");
57 static void dma_buf_release(struct dentry *dentry)
59 struct dma_buf *dmabuf;
61 dmabuf = dentry->d_fsdata;
62 if (unlikely(!dmabuf))
65 BUG_ON(dmabuf->vmapping_counter);
68 * Any fences that a dma-buf poll can wait on should be signaled
69 * before releasing dma-buf. This is the responsibility of each
70 * driver that uses the reservation objects.
72 * If you hit this BUG() it means someone dropped their ref to the
73 * dma-buf while still having pending operation to the buffer.
75 BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
77 dmabuf->ops->release(dmabuf);
79 mutex_lock(&db_list.lock);
80 list_del(&dmabuf->list_node);
81 mutex_unlock(&db_list.lock);
83 if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
84 dma_resv_fini(dmabuf->resv);
86 module_put(dmabuf->owner);
91 static const struct dentry_operations dma_buf_dentry_ops = {
92 .d_dname = dmabuffs_dname,
93 .d_release = dma_buf_release,
96 static struct vfsmount *dma_buf_mnt;
98 static int dma_buf_fs_init_context(struct fs_context *fc)
100 struct pseudo_fs_context *ctx;
102 ctx = init_pseudo(fc, DMA_BUF_MAGIC);
105 ctx->dops = &dma_buf_dentry_ops;
109 static struct file_system_type dma_buf_fs_type = {
111 .init_fs_context = dma_buf_fs_init_context,
112 .kill_sb = kill_anon_super,
115 static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
117 struct dma_buf *dmabuf;
119 if (!is_dma_buf_file(file))
122 dmabuf = file->private_data;
124 /* check if buffer supports mmap */
125 if (!dmabuf->ops->mmap)
128 /* check for overflowing the buffer's size */
129 if (vma->vm_pgoff + vma_pages(vma) >
130 dmabuf->size >> PAGE_SHIFT)
133 return dmabuf->ops->mmap(dmabuf, vma);
136 static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
138 struct dma_buf *dmabuf;
141 if (!is_dma_buf_file(file))
144 dmabuf = file->private_data;
146 /* only support discovering the end of the buffer,
147 but also allow SEEK_SET to maintain the idiomatic
148 SEEK_END(0), SEEK_CUR(0) pattern */
149 if (whence == SEEK_END)
151 else if (whence == SEEK_SET)
159 return base + offset;
163 * DOC: implicit fence polling
165 * To support cross-device and cross-driver synchronization of buffer access
166 * implicit fences (represented internally in the kernel with &struct dma_fence)
167 * can be attached to a &dma_buf. The glue for that and a few related things are
168 * provided in the &dma_resv structure.
170 * Userspace can query the state of these implicitly tracked fences using poll()
171 * and related system calls:
173 * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
174 * most recent write or exclusive fence.
176 * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
177 * all attached fences, shared and exclusive ones.
179 * Note that this only signals the completion of the respective fences, i.e. the
180 * DMA transfers are complete. Cache flushing and any other necessary
181 * preparations before CPU access can begin still need to happen.
184 static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
186 struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
189 spin_lock_irqsave(&dcb->poll->lock, flags);
190 wake_up_locked_poll(dcb->poll, dcb->active);
192 spin_unlock_irqrestore(&dcb->poll->lock, flags);
195 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
197 struct dma_buf *dmabuf;
198 struct dma_resv *resv;
199 struct dma_resv_list *fobj;
200 struct dma_fence *fence_excl;
202 unsigned shared_count, seq;
204 dmabuf = file->private_data;
205 if (!dmabuf || !dmabuf->resv)
210 poll_wait(file, &dmabuf->poll, poll);
212 events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
217 seq = read_seqcount_begin(&resv->seq);
220 fobj = rcu_dereference(resv->fence);
222 shared_count = fobj->shared_count;
225 fence_excl = rcu_dereference(resv->fence_excl);
226 if (read_seqcount_retry(&resv->seq, seq)) {
231 if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
232 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
233 __poll_t pevents = EPOLLIN;
235 if (shared_count == 0)
238 spin_lock_irq(&dmabuf->poll.lock);
240 dcb->active |= pevents;
243 dcb->active = pevents;
244 spin_unlock_irq(&dmabuf->poll.lock);
246 if (events & pevents) {
247 if (!dma_fence_get_rcu(fence_excl)) {
248 /* force a recheck */
250 dma_buf_poll_cb(NULL, &dcb->cb);
251 } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
254 dma_fence_put(fence_excl);
257 * No callback queued, wake up any additional
260 dma_fence_put(fence_excl);
261 dma_buf_poll_cb(NULL, &dcb->cb);
266 if ((events & EPOLLOUT) && shared_count > 0) {
267 struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
270 /* Only queue a new callback if no event has fired yet */
271 spin_lock_irq(&dmabuf->poll.lock);
275 dcb->active = EPOLLOUT;
276 spin_unlock_irq(&dmabuf->poll.lock);
278 if (!(events & EPOLLOUT))
281 for (i = 0; i < shared_count; ++i) {
282 struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
284 if (!dma_fence_get_rcu(fence)) {
286 * fence refcount dropped to zero, this means
287 * that fobj has been freed
289 * call dma_buf_poll_cb and force a recheck!
292 dma_buf_poll_cb(NULL, &dcb->cb);
295 if (!dma_fence_add_callback(fence, &dcb->cb,
297 dma_fence_put(fence);
301 dma_fence_put(fence);
304 /* No callback queued, wake up any additional waiters. */
305 if (i == shared_count)
306 dma_buf_poll_cb(NULL, &dcb->cb);
315 * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
316 * The name of the dma-buf buffer can only be set when the dma-buf is not
317 * attached to any devices. It could theoritically support changing the
318 * name of the dma-buf if the same piece of memory is used for multiple
319 * purpose between different devices.
321 * @dmabuf: [in] dmabuf buffer that will be renamed.
322 * @buf: [in] A piece of userspace memory that contains the name of
325 * Returns 0 on success. If the dma-buf buffer is already attached to
326 * devices, return -EBUSY.
329 static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
331 char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
335 return PTR_ERR(name);
337 dma_resv_lock(dmabuf->resv, NULL);
338 if (!list_empty(&dmabuf->attachments)) {
343 spin_lock(&dmabuf->name_lock);
346 spin_unlock(&dmabuf->name_lock);
349 dma_resv_unlock(dmabuf->resv);
353 static long dma_buf_ioctl(struct file *file,
354 unsigned int cmd, unsigned long arg)
356 struct dma_buf *dmabuf;
357 struct dma_buf_sync sync;
358 enum dma_data_direction direction;
361 dmabuf = file->private_data;
364 case DMA_BUF_IOCTL_SYNC:
365 if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
368 if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
371 switch (sync.flags & DMA_BUF_SYNC_RW) {
372 case DMA_BUF_SYNC_READ:
373 direction = DMA_FROM_DEVICE;
375 case DMA_BUF_SYNC_WRITE:
376 direction = DMA_TO_DEVICE;
378 case DMA_BUF_SYNC_RW:
379 direction = DMA_BIDIRECTIONAL;
385 if (sync.flags & DMA_BUF_SYNC_END)
386 ret = dma_buf_end_cpu_access(dmabuf, direction);
388 ret = dma_buf_begin_cpu_access(dmabuf, direction);
392 case DMA_BUF_SET_NAME_A:
393 case DMA_BUF_SET_NAME_B:
394 return dma_buf_set_name(dmabuf, (const char __user *)arg);
401 static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
403 struct dma_buf *dmabuf = file->private_data;
405 seq_printf(m, "size:\t%zu\n", dmabuf->size);
406 /* Don't count the temporary reference taken inside procfs seq_show */
407 seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
408 seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
409 spin_lock(&dmabuf->name_lock);
411 seq_printf(m, "name:\t%s\n", dmabuf->name);
412 spin_unlock(&dmabuf->name_lock);
415 static const struct file_operations dma_buf_fops = {
416 .mmap = dma_buf_mmap_internal,
417 .llseek = dma_buf_llseek,
418 .poll = dma_buf_poll,
419 .unlocked_ioctl = dma_buf_ioctl,
420 .compat_ioctl = compat_ptr_ioctl,
421 .show_fdinfo = dma_buf_show_fdinfo,
425 * is_dma_buf_file - Check if struct file* is associated with dma_buf
427 static inline int is_dma_buf_file(struct file *file)
429 return file->f_op == &dma_buf_fops;
432 static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
435 struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
438 return ERR_CAST(inode);
440 inode->i_size = dmabuf->size;
441 inode_set_bytes(inode, dmabuf->size);
443 file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
444 flags, &dma_buf_fops);
447 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
448 file->private_data = dmabuf;
449 file->f_path.dentry->d_fsdata = dmabuf;
459 * DOC: dma buf device access
461 * For device DMA access to a shared DMA buffer the usual sequence of operations
464 * 1. The exporter defines his exporter instance using
465 * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
466 * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
467 * as a file descriptor by calling dma_buf_fd().
469 * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
470 * to share with: First the filedescriptor is converted to a &dma_buf using
471 * dma_buf_get(). Then the buffer is attached to the device using
474 * Up to this stage the exporter is still free to migrate or reallocate the
477 * 3. Once the buffer is attached to all devices userspace can initiate DMA
478 * access to the shared buffer. In the kernel this is done by calling
479 * dma_buf_map_attachment() and dma_buf_unmap_attachment().
481 * 4. Once a driver is done with a shared buffer it needs to call
482 * dma_buf_detach() (after cleaning up any mappings) and then release the
483 * reference acquired with dma_buf_get() by calling dma_buf_put().
485 * For the detailed semantics exporters are expected to implement see
490 * dma_buf_export - Creates a new dma_buf, and associates an anon file
491 * with this buffer, so it can be exported.
492 * Also connect the allocator specific data and ops to the buffer.
493 * Additionally, provide a name string for exporter; useful in debugging.
495 * @exp_info: [in] holds all the export related information provided
496 * by the exporter. see &struct dma_buf_export_info
497 * for further details.
499 * Returns, on success, a newly created struct dma_buf object, which wraps the
500 * supplied private data and operations for struct dma_buf_ops. On either
501 * missing ops, or error in allocating struct dma_buf, will return negative
504 * For most cases the easiest way to create @exp_info is through the
505 * %DEFINE_DMA_BUF_EXPORT_INFO macro.
507 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
509 struct dma_buf *dmabuf;
510 struct dma_resv *resv = exp_info->resv;
512 size_t alloc_size = sizeof(struct dma_buf);
516 alloc_size += sizeof(struct dma_resv);
518 /* prevent &dma_buf[1] == dma_buf->resv */
521 if (WARN_ON(!exp_info->priv
523 || !exp_info->ops->map_dma_buf
524 || !exp_info->ops->unmap_dma_buf
525 || !exp_info->ops->release)) {
526 return ERR_PTR(-EINVAL);
529 if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
530 (exp_info->ops->pin || exp_info->ops->unpin)))
531 return ERR_PTR(-EINVAL);
533 if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
534 return ERR_PTR(-EINVAL);
536 if (!try_module_get(exp_info->owner))
537 return ERR_PTR(-ENOENT);
539 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
545 dmabuf->priv = exp_info->priv;
546 dmabuf->ops = exp_info->ops;
547 dmabuf->size = exp_info->size;
548 dmabuf->exp_name = exp_info->exp_name;
549 dmabuf->owner = exp_info->owner;
550 spin_lock_init(&dmabuf->name_lock);
551 init_waitqueue_head(&dmabuf->poll);
552 dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
553 dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
556 resv = (struct dma_resv *)&dmabuf[1];
561 file = dma_buf_getfile(dmabuf, exp_info->flags);
567 file->f_mode |= FMODE_LSEEK;
570 mutex_init(&dmabuf->lock);
571 INIT_LIST_HEAD(&dmabuf->attachments);
573 mutex_lock(&db_list.lock);
574 list_add(&dmabuf->list_node, &db_list.head);
575 mutex_unlock(&db_list.lock);
582 module_put(exp_info->owner);
585 EXPORT_SYMBOL_GPL(dma_buf_export);
588 * dma_buf_fd - returns a file descriptor for the given struct dma_buf
589 * @dmabuf: [in] pointer to dma_buf for which fd is required.
590 * @flags: [in] flags to give to fd
592 * On success, returns an associated 'fd'. Else, returns error.
594 int dma_buf_fd(struct dma_buf *dmabuf, int flags)
598 if (!dmabuf || !dmabuf->file)
601 fd = get_unused_fd_flags(flags);
605 fd_install(fd, dmabuf->file);
609 EXPORT_SYMBOL_GPL(dma_buf_fd);
612 * dma_buf_get - returns the struct dma_buf related to an fd
613 * @fd: [in] fd associated with the struct dma_buf to be returned
615 * On success, returns the struct dma_buf associated with an fd; uses
616 * file's refcounting done by fget to increase refcount. returns ERR_PTR
619 struct dma_buf *dma_buf_get(int fd)
626 return ERR_PTR(-EBADF);
628 if (!is_dma_buf_file(file)) {
630 return ERR_PTR(-EINVAL);
633 return file->private_data;
635 EXPORT_SYMBOL_GPL(dma_buf_get);
638 * dma_buf_put - decreases refcount of the buffer
639 * @dmabuf: [in] buffer to reduce refcount of
641 * Uses file's refcounting done implicitly by fput().
643 * If, as a result of this call, the refcount becomes 0, the 'release' file
644 * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
645 * in turn, and frees the memory allocated for dmabuf when exported.
647 void dma_buf_put(struct dma_buf *dmabuf)
649 if (WARN_ON(!dmabuf || !dmabuf->file))
654 EXPORT_SYMBOL_GPL(dma_buf_put);
657 * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
658 * @dmabuf: [in] buffer to attach device to.
659 * @dev: [in] device to be attached.
660 * @importer_ops: [in] importer operations for the attachment
661 * @importer_priv: [in] importer private pointer for the attachment
663 * Returns struct dma_buf_attachment pointer for this attachment. Attachments
664 * must be cleaned up by calling dma_buf_detach().
666 * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
671 * A pointer to newly created &dma_buf_attachment on success, or a negative
672 * error code wrapped into a pointer on failure.
674 * Note that this can fail if the backing storage of @dmabuf is in a place not
675 * accessible to @dev, and cannot be moved to a more suitable place. This is
676 * indicated with the error code -EBUSY.
678 struct dma_buf_attachment *
679 dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
680 const struct dma_buf_attach_ops *importer_ops,
683 struct dma_buf_attachment *attach;
686 if (WARN_ON(!dmabuf || !dev))
687 return ERR_PTR(-EINVAL);
689 if (WARN_ON(importer_ops && !importer_ops->move_notify))
690 return ERR_PTR(-EINVAL);
692 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
694 return ERR_PTR(-ENOMEM);
697 attach->dmabuf = dmabuf;
699 attach->peer2peer = importer_ops->allow_peer2peer;
700 attach->importer_ops = importer_ops;
701 attach->importer_priv = importer_priv;
703 if (dmabuf->ops->attach) {
704 ret = dmabuf->ops->attach(dmabuf, attach);
708 dma_resv_lock(dmabuf->resv, NULL);
709 list_add(&attach->node, &dmabuf->attachments);
710 dma_resv_unlock(dmabuf->resv);
712 /* When either the importer or the exporter can't handle dynamic
713 * mappings we cache the mapping here to avoid issues with the
714 * reservation object lock.
716 if (dma_buf_attachment_is_dynamic(attach) !=
717 dma_buf_is_dynamic(dmabuf)) {
718 struct sg_table *sgt;
720 if (dma_buf_is_dynamic(attach->dmabuf)) {
721 dma_resv_lock(attach->dmabuf->resv, NULL);
722 ret = dma_buf_pin(attach);
727 sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
729 sgt = ERR_PTR(-ENOMEM);
734 if (dma_buf_is_dynamic(attach->dmabuf))
735 dma_resv_unlock(attach->dmabuf->resv);
737 attach->dir = DMA_BIDIRECTIONAL;
747 if (dma_buf_is_dynamic(attach->dmabuf))
748 dma_buf_unpin(attach);
751 if (dma_buf_is_dynamic(attach->dmabuf))
752 dma_resv_unlock(attach->dmabuf->resv);
754 dma_buf_detach(dmabuf, attach);
757 EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
760 * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
761 * @dmabuf: [in] buffer to attach device to.
762 * @dev: [in] device to be attached.
764 * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
767 struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
770 return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
772 EXPORT_SYMBOL_GPL(dma_buf_attach);
775 * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
776 * @dmabuf: [in] buffer to detach from.
777 * @attach: [in] attachment to be detached; is free'd after this call.
779 * Clean up a device attachment obtained by calling dma_buf_attach().
781 * Optionally this calls &dma_buf_ops.detach for device-specific detach.
783 void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
785 if (WARN_ON(!dmabuf || !attach))
789 if (dma_buf_is_dynamic(attach->dmabuf))
790 dma_resv_lock(attach->dmabuf->resv, NULL);
792 dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
794 if (dma_buf_is_dynamic(attach->dmabuf)) {
795 dma_buf_unpin(attach);
796 dma_resv_unlock(attach->dmabuf->resv);
800 dma_resv_lock(dmabuf->resv, NULL);
801 list_del(&attach->node);
802 dma_resv_unlock(dmabuf->resv);
803 if (dmabuf->ops->detach)
804 dmabuf->ops->detach(dmabuf, attach);
808 EXPORT_SYMBOL_GPL(dma_buf_detach);
811 * dma_buf_pin - Lock down the DMA-buf
812 * @attach: [in] attachment which should be pinned
814 * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
815 * call this, and only for limited use cases like scanout and not for temporary
816 * pin operations. It is not permitted to allow userspace to pin arbitrary
817 * amounts of buffers through this interface.
819 * Buffers must be unpinned by calling dma_buf_unpin().
822 * 0 on success, negative error code on failure.
824 int dma_buf_pin(struct dma_buf_attachment *attach)
826 struct dma_buf *dmabuf = attach->dmabuf;
829 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
831 dma_resv_assert_held(dmabuf->resv);
833 if (dmabuf->ops->pin)
834 ret = dmabuf->ops->pin(attach);
838 EXPORT_SYMBOL_GPL(dma_buf_pin);
841 * dma_buf_unpin - Unpin a DMA-buf
842 * @attach: [in] attachment which should be unpinned
844 * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
845 * any mapping of @attach again and inform the importer through
846 * &dma_buf_attach_ops.move_notify.
848 void dma_buf_unpin(struct dma_buf_attachment *attach)
850 struct dma_buf *dmabuf = attach->dmabuf;
852 WARN_ON(!dma_buf_attachment_is_dynamic(attach));
854 dma_resv_assert_held(dmabuf->resv);
856 if (dmabuf->ops->unpin)
857 dmabuf->ops->unpin(attach);
859 EXPORT_SYMBOL_GPL(dma_buf_unpin);
862 * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
863 * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
865 * @attach: [in] attachment whose scatterlist is to be returned
866 * @direction: [in] direction of DMA transfer
868 * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
869 * on error. May return -EINTR if it is interrupted by a signal.
871 * On success, the DMA addresses and lengths in the returned scatterlist are
874 * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
875 * the underlying backing storage is pinned for as long as a mapping exists,
876 * therefore users/importers should not hold onto a mapping for undue amounts of
879 struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
880 enum dma_data_direction direction)
882 struct sg_table *sg_table;
887 if (WARN_ON(!attach || !attach->dmabuf))
888 return ERR_PTR(-EINVAL);
890 if (dma_buf_attachment_is_dynamic(attach))
891 dma_resv_assert_held(attach->dmabuf->resv);
895 * Two mappings with different directions for the same
896 * attachment are not allowed.
898 if (attach->dir != direction &&
899 attach->dir != DMA_BIDIRECTIONAL)
900 return ERR_PTR(-EBUSY);
905 if (dma_buf_is_dynamic(attach->dmabuf)) {
906 dma_resv_assert_held(attach->dmabuf->resv);
907 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
908 r = dma_buf_pin(attach);
914 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
916 sg_table = ERR_PTR(-ENOMEM);
918 if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
919 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
920 dma_buf_unpin(attach);
922 if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
923 attach->sgt = sg_table;
924 attach->dir = direction;
927 #ifdef CONFIG_DMA_API_DEBUG
928 if (!IS_ERR(sg_table)) {
929 struct scatterlist *sg;
934 for_each_sgtable_dma_sg(sg_table, sg, i) {
935 addr = sg_dma_address(sg);
936 len = sg_dma_len(sg);
937 if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
938 pr_debug("%s: addr %llx or len %x is not page aligned!\n",
939 __func__, addr, len);
943 #endif /* CONFIG_DMA_API_DEBUG */
947 EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
950 * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
951 * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
953 * @attach: [in] attachment to unmap buffer from
954 * @sg_table: [in] scatterlist info of the buffer to unmap
955 * @direction: [in] direction of DMA transfer
957 * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
959 void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
960 struct sg_table *sg_table,
961 enum dma_data_direction direction)
965 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
968 if (dma_buf_attachment_is_dynamic(attach))
969 dma_resv_assert_held(attach->dmabuf->resv);
971 if (attach->sgt == sg_table)
974 if (dma_buf_is_dynamic(attach->dmabuf))
975 dma_resv_assert_held(attach->dmabuf->resv);
977 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
979 if (dma_buf_is_dynamic(attach->dmabuf) &&
980 !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
981 dma_buf_unpin(attach);
983 EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
986 * dma_buf_move_notify - notify attachments that DMA-buf is moving
988 * @dmabuf: [in] buffer which is moving
990 * Informs all attachmenst that they need to destroy and recreated all their
993 void dma_buf_move_notify(struct dma_buf *dmabuf)
995 struct dma_buf_attachment *attach;
997 dma_resv_assert_held(dmabuf->resv);
999 list_for_each_entry(attach, &dmabuf->attachments, node)
1000 if (attach->importer_ops)
1001 attach->importer_ops->move_notify(attach);
1003 EXPORT_SYMBOL_GPL(dma_buf_move_notify);
1008 * There are mutliple reasons for supporting CPU access to a dma buffer object:
1010 * - Fallback operations in the kernel, for example when a device is connected
1011 * over USB and the kernel needs to shuffle the data around first before
1012 * sending it away. Cache coherency is handled by braketing any transactions
1013 * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1016 * Since for most kernel internal dma-buf accesses need the entire buffer, a
1017 * vmap interface is introduced. Note that on very old 32-bit architectures
1018 * vmalloc space might be limited and result in vmap calls failing.
1022 * void \*dma_buf_vmap(struct dma_buf \*dmabuf)
1023 * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
1025 * The vmap call can fail if there is no vmap support in the exporter, or if
1026 * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
1027 * count for all vmap access and calls down into the exporter's vmap function
1028 * only when no vmapping exists, and only unmaps it once. Protection against
1029 * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1031 * - For full compatibility on the importer side with existing userspace
1032 * interfaces, which might already support mmap'ing buffers. This is needed in
1033 * many processing pipelines (e.g. feeding a software rendered image into a
1034 * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
1035 * framework already supported this and for DMA buffer file descriptors to
1036 * replace ION buffers mmap support was needed.
1038 * There is no special interfaces, userspace simply calls mmap on the dma-buf
1039 * fd. But like for CPU access there's a need to braket the actual access,
1040 * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
1041 * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
1044 * Some systems might need some sort of cache coherency management e.g. when
1045 * CPU and GPU domains are being accessed through dma-buf at the same time.
1046 * To circumvent this problem there are begin/end coherency markers, that
1047 * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
1048 * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
1049 * sequence would be used like following:
1052 * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
1053 * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
1054 * want (with the new data being consumed by say the GPU or the scanout
1056 * - munmap once you don't need the buffer any more
1058 * For correctness and optimal performance, it is always required to use
1059 * SYNC_START and SYNC_END before and after, respectively, when accessing the
1060 * mapped address. Userspace cannot rely on coherent access, even when there
1061 * are systems where it just works without calling these ioctls.
1063 * - And as a CPU fallback in userspace processing pipelines.
1065 * Similar to the motivation for kernel cpu access it is again important that
1066 * the userspace code of a given importing subsystem can use the same
1067 * interfaces with a imported dma-buf buffer object as with a native buffer
1068 * object. This is especially important for drm where the userspace part of
1069 * contemporary OpenGL, X, and other drivers is huge, and reworking them to
1070 * use a different way to mmap a buffer rather invasive.
1072 * The assumption in the current dma-buf interfaces is that redirecting the
1073 * initial mmap is all that's needed. A survey of some of the existing
1074 * subsystems shows that no driver seems to do any nefarious thing like
1075 * syncing up with outstanding asynchronous processing on the device or
1076 * allocating special resources at fault time. So hopefully this is good
1077 * enough, since adding interfaces to intercept pagefaults and allow pte
1078 * shootdowns would increase the complexity quite a bit.
1082 * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
1085 * If the importing subsystem simply provides a special-purpose mmap call to
1086 * set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1087 * equally achieve that for a dma-buf object.
1090 static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1091 enum dma_data_direction direction)
1093 bool write = (direction == DMA_BIDIRECTIONAL ||
1094 direction == DMA_TO_DEVICE);
1095 struct dma_resv *resv = dmabuf->resv;
1098 /* Wait on any implicit rendering fences */
1099 ret = dma_resv_wait_timeout_rcu(resv, write, true,
1100 MAX_SCHEDULE_TIMEOUT);
1108 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1109 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1110 * preparations. Coherency is only guaranteed in the specified range for the
1111 * specified access direction.
1112 * @dmabuf: [in] buffer to prepare cpu access for.
1113 * @direction: [in] length of range for cpu access.
1115 * After the cpu access is complete the caller should call
1116 * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
1117 * it guaranteed to be coherent with other DMA access.
1119 * This function will also wait for any DMA transactions tracked through
1120 * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1121 * synchronization this function will only ensure cache coherency, callers must
1122 * ensure synchronization with such DMA transactions on their own.
1124 * Can return negative error values, returns 0 on success.
1126 int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1127 enum dma_data_direction direction)
1131 if (WARN_ON(!dmabuf))
1134 might_lock(&dmabuf->resv->lock.base);
1136 if (dmabuf->ops->begin_cpu_access)
1137 ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
1139 /* Ensure that all fences are waited upon - but we first allow
1140 * the native handler the chance to do so more efficiently if it
1141 * chooses. A double invocation here will be reasonably cheap no-op.
1144 ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1148 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
1151 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
1152 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1153 * actions. Coherency is only guaranteed in the specified range for the
1154 * specified access direction.
1155 * @dmabuf: [in] buffer to complete cpu access for.
1156 * @direction: [in] length of range for cpu access.
1158 * This terminates CPU access started with dma_buf_begin_cpu_access().
1160 * Can return negative error values, returns 0 on success.
1162 int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
1163 enum dma_data_direction direction)
1169 might_lock(&dmabuf->resv->lock.base);
1171 if (dmabuf->ops->end_cpu_access)
1172 ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
1176 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
1180 * dma_buf_mmap - Setup up a userspace mmap with the given vma
1181 * @dmabuf: [in] buffer that should back the vma
1182 * @vma: [in] vma for the mmap
1183 * @pgoff: [in] offset in pages where this mmap should start within the
1186 * This function adjusts the passed in vma so that it points at the file of the
1187 * dma_buf operation. It also adjusts the starting pgoff and does bounds
1188 * checking on the size of the vma. Then it calls the exporters mmap function to
1189 * set up the mapping.
1191 * Can return negative error values, returns 0 on success.
1193 int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1194 unsigned long pgoff)
1196 if (WARN_ON(!dmabuf || !vma))
1199 /* check if buffer supports mmap */
1200 if (!dmabuf->ops->mmap)
1203 /* check for offset overflow */
1204 if (pgoff + vma_pages(vma) < pgoff)
1207 /* check for overflowing the buffer's size */
1208 if (pgoff + vma_pages(vma) >
1209 dmabuf->size >> PAGE_SHIFT)
1212 /* readjust the vma */
1213 vma_set_file(vma, dmabuf->file);
1214 vma->vm_pgoff = pgoff;
1216 return dmabuf->ops->mmap(dmabuf, vma);
1218 EXPORT_SYMBOL_GPL(dma_buf_mmap);
1221 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1222 * address space. Same restrictions as for vmap and friends apply.
1223 * @dmabuf: [in] buffer to vmap
1224 * @map: [out] returns the vmap pointer
1226 * This call may fail due to lack of virtual mapping address space.
1227 * These calls are optional in drivers. The intended use for them
1228 * is for mapping objects linear in kernel space for high use objects.
1230 * To ensure coherency users must call dma_buf_begin_cpu_access() and
1231 * dma_buf_end_cpu_access() around any cpu access performed through this
1234 * Returns 0 on success, or a negative errno code otherwise.
1236 int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
1238 struct dma_buf_map ptr;
1241 dma_buf_map_clear(map);
1243 if (WARN_ON(!dmabuf))
1246 if (!dmabuf->ops->vmap)
1249 mutex_lock(&dmabuf->lock);
1250 if (dmabuf->vmapping_counter) {
1251 dmabuf->vmapping_counter++;
1252 BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
1253 *map = dmabuf->vmap_ptr;
1257 BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr));
1259 ret = dmabuf->ops->vmap(dmabuf, &ptr);
1260 if (WARN_ON_ONCE(ret))
1263 dmabuf->vmap_ptr = ptr;
1264 dmabuf->vmapping_counter = 1;
1266 *map = dmabuf->vmap_ptr;
1269 mutex_unlock(&dmabuf->lock);
1272 EXPORT_SYMBOL_GPL(dma_buf_vmap);
1275 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
1276 * @dmabuf: [in] buffer to vunmap
1277 * @map: [in] vmap pointer to vunmap
1279 void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
1281 if (WARN_ON(!dmabuf))
1284 BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
1285 BUG_ON(dmabuf->vmapping_counter == 0);
1286 BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map));
1288 mutex_lock(&dmabuf->lock);
1289 if (--dmabuf->vmapping_counter == 0) {
1290 if (dmabuf->ops->vunmap)
1291 dmabuf->ops->vunmap(dmabuf, map);
1292 dma_buf_map_clear(&dmabuf->vmap_ptr);
1294 mutex_unlock(&dmabuf->lock);
1296 EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1298 #ifdef CONFIG_DEBUG_FS
1299 static int dma_buf_debug_show(struct seq_file *s, void *unused)
1302 struct dma_buf *buf_obj;
1303 struct dma_buf_attachment *attach_obj;
1304 struct dma_resv *robj;
1305 struct dma_resv_list *fobj;
1306 struct dma_fence *fence;
1308 int count = 0, attach_count, shared_count, i;
1311 ret = mutex_lock_interruptible(&db_list.lock);
1316 seq_puts(s, "\nDma-buf Objects:\n");
1317 seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1318 "size", "flags", "mode", "count", "ino");
1320 list_for_each_entry(buf_obj, &db_list.head, list_node) {
1322 ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1326 seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1328 buf_obj->file->f_flags, buf_obj->file->f_mode,
1329 file_count(buf_obj->file),
1331 file_inode(buf_obj->file)->i_ino,
1332 buf_obj->name ?: "");
1334 robj = buf_obj->resv;
1336 seq = read_seqcount_begin(&robj->seq);
1338 fobj = rcu_dereference(robj->fence);
1339 shared_count = fobj ? fobj->shared_count : 0;
1340 fence = rcu_dereference(robj->fence_excl);
1341 if (!read_seqcount_retry(&robj->seq, seq))
1347 seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
1348 fence->ops->get_driver_name(fence),
1349 fence->ops->get_timeline_name(fence),
1350 dma_fence_is_signaled(fence) ? "" : "un");
1351 for (i = 0; i < shared_count; i++) {
1352 fence = rcu_dereference(fobj->shared[i]);
1353 if (!dma_fence_get_rcu(fence))
1355 seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
1356 fence->ops->get_driver_name(fence),
1357 fence->ops->get_timeline_name(fence),
1358 dma_fence_is_signaled(fence) ? "" : "un");
1359 dma_fence_put(fence);
1363 seq_puts(s, "\tAttached Devices:\n");
1366 list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1367 seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1370 dma_resv_unlock(buf_obj->resv);
1372 seq_printf(s, "Total %d devices attached\n\n",
1376 size += buf_obj->size;
1379 seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1381 mutex_unlock(&db_list.lock);
1385 mutex_unlock(&db_list.lock);
1389 DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1391 static struct dentry *dma_buf_debugfs_dir;
1393 static int dma_buf_init_debugfs(void)
1398 d = debugfs_create_dir("dma_buf", NULL);
1402 dma_buf_debugfs_dir = d;
1404 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1405 NULL, &dma_buf_debug_fops);
1407 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1408 debugfs_remove_recursive(dma_buf_debugfs_dir);
1409 dma_buf_debugfs_dir = NULL;
1416 static void dma_buf_uninit_debugfs(void)
1418 debugfs_remove_recursive(dma_buf_debugfs_dir);
1421 static inline int dma_buf_init_debugfs(void)
1425 static inline void dma_buf_uninit_debugfs(void)
1430 static int __init dma_buf_init(void)
1432 dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1433 if (IS_ERR(dma_buf_mnt))
1434 return PTR_ERR(dma_buf_mnt);
1436 mutex_init(&db_list.lock);
1437 INIT_LIST_HEAD(&db_list.head);
1438 dma_buf_init_debugfs();
1441 subsys_initcall(dma_buf_init);
1443 static void __exit dma_buf_deinit(void)
1445 dma_buf_uninit_debugfs();
1446 kern_unmount(dma_buf_mnt);
1448 __exitcall(dma_buf_deinit);