2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Pawel Osciak <pawel@osciak.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/refcount.h>
16 #include <linux/scatterlist.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dma-mapping.h>
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-dma-contig.h>
23 #include <media/videobuf2-memops.h>
32 enum dma_data_direction dma_dir;
33 struct sg_table *dma_sgt;
34 struct frame_vector *vec;
37 struct vb2_vmarea_handler handler;
39 struct sg_table *sgt_base;
42 struct dma_buf_attachment *db_attach;
45 /*********************************************/
46 /* scatterlist table functions */
47 /*********************************************/
49 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
51 struct scatterlist *s;
52 dma_addr_t expected = sg_dma_address(sgt->sgl);
54 unsigned long size = 0;
56 for_each_sg(sgt->sgl, s, sgt->nents, i) {
57 if (sg_dma_address(s) != expected)
59 expected = sg_dma_address(s) + sg_dma_len(s);
60 size += sg_dma_len(s);
65 /*********************************************/
66 /* callbacks for all buffers */
67 /*********************************************/
69 static void *vb2_dc_cookie(void *buf_priv)
71 struct vb2_dc_buf *buf = buf_priv;
73 return &buf->dma_addr;
76 static void *vb2_dc_vaddr(void *buf_priv)
78 struct vb2_dc_buf *buf = buf_priv;
80 if (!buf->vaddr && buf->db_attach)
81 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
86 static unsigned int vb2_dc_num_users(void *buf_priv)
88 struct vb2_dc_buf *buf = buf_priv;
90 return refcount_read(&buf->refcount);
93 static void vb2_dc_prepare(void *buf_priv)
95 struct vb2_dc_buf *buf = buf_priv;
96 struct sg_table *sgt = buf->dma_sgt;
98 /* DMABUF exporter will flush the cache for us */
99 if (!sgt || buf->db_attach)
102 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
106 static void vb2_dc_finish(void *buf_priv)
108 struct vb2_dc_buf *buf = buf_priv;
109 struct sg_table *sgt = buf->dma_sgt;
111 /* DMABUF exporter will flush the cache for us */
112 if (!sgt || buf->db_attach)
115 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
118 /*********************************************/
119 /* callbacks for MMAP buffers */
120 /*********************************************/
122 static void vb2_dc_put(void *buf_priv)
124 struct vb2_dc_buf *buf = buf_priv;
126 if (!refcount_dec_and_test(&buf->refcount))
130 sg_free_table(buf->sgt_base);
131 kfree(buf->sgt_base);
133 dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
135 put_device(buf->dev);
139 static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
140 unsigned long size, enum dma_data_direction dma_dir,
143 struct vb2_dc_buf *buf;
146 return ERR_PTR(-EINVAL);
148 buf = kzalloc(sizeof *buf, GFP_KERNEL);
150 return ERR_PTR(-ENOMEM);
154 buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
155 GFP_KERNEL | gfp_flags, buf->attrs);
157 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
159 return ERR_PTR(-ENOMEM);
162 if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
163 buf->vaddr = buf->cookie;
165 /* Prevent the device from being released while the buffer is used */
166 buf->dev = get_device(dev);
168 buf->dma_dir = dma_dir;
170 buf->handler.refcount = &buf->refcount;
171 buf->handler.put = vb2_dc_put;
172 buf->handler.arg = buf;
174 refcount_set(&buf->refcount, 1);
179 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
181 struct vb2_dc_buf *buf = buf_priv;
185 printk(KERN_ERR "No buffer to map\n");
189 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
190 buf->dma_addr, buf->size, buf->attrs);
193 pr_err("Remapping memory failed, error: %d\n", ret);
197 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
198 vma->vm_private_data = &buf->handler;
199 vma->vm_ops = &vb2_common_vm_ops;
201 vma->vm_ops->open(vma);
203 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
204 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
210 /*********************************************/
211 /* DMABUF ops for exporters */
212 /*********************************************/
214 struct vb2_dc_attachment {
216 enum dma_data_direction dma_dir;
219 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
220 struct dma_buf_attachment *dbuf_attach)
222 struct vb2_dc_attachment *attach;
224 struct scatterlist *rd, *wr;
225 struct sg_table *sgt;
226 struct vb2_dc_buf *buf = dbuf->priv;
229 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
234 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
235 * map the same scatter list to multiple attachments at the same time.
237 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
243 rd = buf->sgt_base->sgl;
245 for (i = 0; i < sgt->orig_nents; ++i) {
246 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
251 attach->dma_dir = DMA_NONE;
252 dbuf_attach->priv = attach;
257 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
258 struct dma_buf_attachment *db_attach)
260 struct vb2_dc_attachment *attach = db_attach->priv;
261 struct sg_table *sgt;
268 /* release the scatterlist cache */
269 if (attach->dma_dir != DMA_NONE)
271 * Cache sync can be skipped here, as the vb2_dc memory is
272 * allocated from device coherent memory, which means the
273 * memory locations do not require any explicit cache
274 * maintenance prior or after being used by the device.
276 dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
277 attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
280 db_attach->priv = NULL;
283 static struct sg_table *vb2_dc_dmabuf_ops_map(
284 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
286 struct vb2_dc_attachment *attach = db_attach->priv;
287 /* stealing dmabuf mutex to serialize map/unmap operations */
288 struct mutex *lock = &db_attach->dmabuf->lock;
289 struct sg_table *sgt;
294 /* return previously mapped sg table */
295 if (attach->dma_dir == dma_dir) {
300 /* release any previous cache */
301 if (attach->dma_dir != DMA_NONE) {
302 dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
303 attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
304 attach->dma_dir = DMA_NONE;
308 * mapping to the client with new direction, no cache sync
309 * required see comment in vb2_dc_dmabuf_ops_detach()
311 sgt->nents = dma_map_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
312 dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
314 pr_err("failed to map scatterlist\n");
316 return ERR_PTR(-EIO);
319 attach->dma_dir = dma_dir;
326 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
327 struct sg_table *sgt, enum dma_data_direction dma_dir)
329 /* nothing to be done here */
332 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
334 /* drop reference obtained in vb2_dc_get_dmabuf */
335 vb2_dc_put(dbuf->priv);
338 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
340 struct vb2_dc_buf *buf = dbuf->priv;
345 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
346 struct vm_area_struct *vma)
348 return vb2_dc_mmap(dbuf->priv, vma);
351 static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
352 .attach = vb2_dc_dmabuf_ops_attach,
353 .detach = vb2_dc_dmabuf_ops_detach,
354 .map_dma_buf = vb2_dc_dmabuf_ops_map,
355 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
356 .vmap = vb2_dc_dmabuf_ops_vmap,
357 .mmap = vb2_dc_dmabuf_ops_mmap,
358 .release = vb2_dc_dmabuf_ops_release,
361 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
364 struct sg_table *sgt;
366 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
368 dev_err(buf->dev, "failed to alloc sg table\n");
372 ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
373 buf->size, buf->attrs);
375 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
383 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
385 struct vb2_dc_buf *buf = buf_priv;
386 struct dma_buf *dbuf;
387 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
389 exp_info.ops = &vb2_dc_dmabuf_ops;
390 exp_info.size = buf->size;
391 exp_info.flags = flags;
395 buf->sgt_base = vb2_dc_get_base_sgt(buf);
397 if (WARN_ON(!buf->sgt_base))
400 dbuf = dma_buf_export(&exp_info);
404 /* dmabuf keeps reference to vb2 buffer */
405 refcount_inc(&buf->refcount);
410 /*********************************************/
411 /* callbacks for USERPTR buffers */
412 /*********************************************/
414 static void vb2_dc_put_userptr(void *buf_priv)
416 struct vb2_dc_buf *buf = buf_priv;
417 struct sg_table *sgt = buf->dma_sgt;
423 * No need to sync to CPU, it's already synced to the CPU
424 * since the finish() memop will have been called before this.
426 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
427 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
428 pages = frame_vector_pages(buf->vec);
429 /* sgt should exist only if vector contains pages... */
430 BUG_ON(IS_ERR(pages));
431 if (buf->dma_dir == DMA_FROM_DEVICE ||
432 buf->dma_dir == DMA_BIDIRECTIONAL)
433 for (i = 0; i < frame_vector_count(buf->vec); i++)
434 set_page_dirty_lock(pages[i]);
438 dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
441 vb2_destroy_framevec(buf->vec);
445 static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
446 unsigned long size, enum dma_data_direction dma_dir)
448 struct vb2_dc_buf *buf;
449 struct frame_vector *vec;
453 struct sg_table *sgt;
454 unsigned long contig_size;
455 unsigned long dma_align = dma_get_cache_alignment();
457 /* Only cache aligned DMA transfers are reliable */
458 if (!IS_ALIGNED(vaddr | size, dma_align)) {
459 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
460 return ERR_PTR(-EINVAL);
464 pr_debug("size is zero\n");
465 return ERR_PTR(-EINVAL);
469 return ERR_PTR(-EINVAL);
471 buf = kzalloc(sizeof *buf, GFP_KERNEL);
473 return ERR_PTR(-ENOMEM);
476 buf->dma_dir = dma_dir;
478 offset = lower_32_bits(offset_in_page(vaddr));
479 vec = vb2_create_framevec(vaddr, size);
485 n_pages = frame_vector_count(vec);
486 ret = frame_vector_to_pages(vec);
488 unsigned long *nums = frame_vector_pfns(vec);
491 * Failed to convert to pages... Check the memory is physically
492 * contiguous and use direct mapping
494 for (i = 1; i < n_pages; i++)
495 if (nums[i-1] + 1 != nums[i])
497 buf->dma_addr = dma_map_resource(buf->dev,
498 __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
499 if (dma_mapping_error(buf->dev, buf->dma_addr)) {
506 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
508 pr_err("failed to allocate sg table\n");
513 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
514 offset, size, GFP_KERNEL);
516 pr_err("failed to initialize sg table\n");
521 * No need to sync to the device, this will happen later when the
522 * prepare() memop is called.
524 sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
525 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
526 if (sgt->nents <= 0) {
527 pr_err("failed to map scatterlist\n");
532 contig_size = vb2_dc_get_contiguous_size(sgt);
533 if (contig_size < size) {
534 pr_err("contiguous mapping is too small %lu/%lu\n",
540 buf->dma_addr = sg_dma_address(sgt->sgl);
548 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
549 buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
558 vb2_destroy_framevec(vec);
566 /*********************************************/
567 /* callbacks for DMABUF buffers */
568 /*********************************************/
570 static int vb2_dc_map_dmabuf(void *mem_priv)
572 struct vb2_dc_buf *buf = mem_priv;
573 struct sg_table *sgt;
574 unsigned long contig_size;
576 if (WARN_ON(!buf->db_attach)) {
577 pr_err("trying to pin a non attached buffer\n");
581 if (WARN_ON(buf->dma_sgt)) {
582 pr_err("dmabuf buffer is already pinned\n");
586 /* get the associated scatterlist for this buffer */
587 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
589 pr_err("Error getting dmabuf scatterlist\n");
593 /* checking if dmabuf is big enough to store contiguous chunk */
594 contig_size = vb2_dc_get_contiguous_size(sgt);
595 if (contig_size < buf->size) {
596 pr_err("contiguous chunk is too small %lu/%lu\n",
597 contig_size, buf->size);
598 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
602 buf->dma_addr = sg_dma_address(sgt->sgl);
609 static void vb2_dc_unmap_dmabuf(void *mem_priv)
611 struct vb2_dc_buf *buf = mem_priv;
612 struct sg_table *sgt = buf->dma_sgt;
614 if (WARN_ON(!buf->db_attach)) {
615 pr_err("trying to unpin a not attached buffer\n");
620 pr_err("dmabuf buffer is already unpinned\n");
625 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
628 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
634 static void vb2_dc_detach_dmabuf(void *mem_priv)
636 struct vb2_dc_buf *buf = mem_priv;
638 /* if vb2 works correctly you should never detach mapped buffer */
639 if (WARN_ON(buf->dma_addr))
640 vb2_dc_unmap_dmabuf(buf);
642 /* detach this attachment */
643 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
647 static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
648 unsigned long size, enum dma_data_direction dma_dir)
650 struct vb2_dc_buf *buf;
651 struct dma_buf_attachment *dba;
653 if (dbuf->size < size)
654 return ERR_PTR(-EFAULT);
657 return ERR_PTR(-EINVAL);
659 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
661 return ERR_PTR(-ENOMEM);
664 /* create attachment for the dmabuf with the user device */
665 dba = dma_buf_attach(dbuf, buf->dev);
667 pr_err("failed to attach dmabuf\n");
672 buf->dma_dir = dma_dir;
674 buf->db_attach = dba;
679 /*********************************************/
680 /* DMA CONTIG exported functions */
681 /*********************************************/
683 const struct vb2_mem_ops vb2_dma_contig_memops = {
684 .alloc = vb2_dc_alloc,
686 .get_dmabuf = vb2_dc_get_dmabuf,
687 .cookie = vb2_dc_cookie,
688 .vaddr = vb2_dc_vaddr,
690 .get_userptr = vb2_dc_get_userptr,
691 .put_userptr = vb2_dc_put_userptr,
692 .prepare = vb2_dc_prepare,
693 .finish = vb2_dc_finish,
694 .map_dmabuf = vb2_dc_map_dmabuf,
695 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
696 .attach_dmabuf = vb2_dc_attach_dmabuf,
697 .detach_dmabuf = vb2_dc_detach_dmabuf,
698 .num_users = vb2_dc_num_users,
700 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
703 * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
704 * @dev: device for configuring DMA parameters
705 * @size: size of DMA max segment size to set
707 * To allow mapping the scatter-list into a single chunk in the DMA
708 * address space, the device is required to have the DMA max segment
709 * size parameter set to a value larger than the buffer size. Otherwise,
710 * the DMA-mapping subsystem will split the mapping into max segment
711 * size chunks. This function sets the DMA max segment size
712 * parameter to let DMA-mapping map a buffer as a single chunk in DMA
714 * This code assumes that the DMA-mapping subsystem will merge all
715 * scatterlist segments if this is really possible (for example when
716 * an IOMMU is available and enabled).
717 * Ideally, this parameter should be set by the generic bus code, but it
718 * is left with the default 64KiB value due to historical litmiations in
719 * other subsystems (like limited USB host drivers) and there no good
720 * place to set it to the proper value.
721 * This function should be called from the drivers, which are known to
722 * operate on platforms with IOMMU and provide access to shared buffers
723 * (either USERPTR or DMABUF). This should be done before initializing
726 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
728 if (!dev->dma_parms) {
729 dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n");
732 if (dma_get_max_seg_size(dev) < size)
733 return dma_set_max_seg_size(dev, size);
737 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
739 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
740 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
741 MODULE_LICENSE("GPL");