2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
13 #include <linux/module.h>
15 #include <linux/refcount.h>
16 #include <linux/scatterlist.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-memops.h>
23 #include <media/videobuf2-dma-sg.h>
26 module_param(debug, int, 0644);
28 #define dprintk(level, fmt, arg...) \
31 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
34 struct vb2_dma_sg_buf {
38 struct frame_vector *vec;
40 enum dma_data_direction dma_dir;
41 struct sg_table sg_table;
43 * This will point to sg_table when used with the MMAP or USERPTR
44 * memory model, and to the dma_buf sglist when used with the
45 * DMABUF memory model.
47 struct sg_table *dma_sgt;
49 unsigned int num_pages;
51 struct vb2_vmarea_handler handler;
53 struct dma_buf_attachment *db_attach;
55 struct vb2_buffer *vb;
58 static void vb2_dma_sg_put(void *buf_priv);
60 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
63 unsigned int last_page = 0;
64 unsigned long size = buf->size;
71 order = get_order(size);
72 /* Don't over allocate*/
73 if ((PAGE_SIZE << order) > size)
78 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
79 __GFP_NOWARN | gfp_flags, order);
85 __free_page(buf->pages[last_page]);
91 split_page(pages, order);
92 for (i = 0; i < (1 << order); i++)
93 buf->pages[last_page++] = &pages[i];
95 size -= PAGE_SIZE << order;
101 static void *vb2_dma_sg_alloc(struct vb2_buffer *vb, struct device *dev,
104 struct vb2_dma_sg_buf *buf;
105 struct sg_table *sgt;
109 if (WARN_ON(!dev) || WARN_ON(!size))
110 return ERR_PTR(-EINVAL);
112 buf = kzalloc(sizeof *buf, GFP_KERNEL);
114 return ERR_PTR(-ENOMEM);
117 buf->dma_dir = vb->vb2_queue->dma_dir;
120 /* size is already page aligned */
121 buf->num_pages = size >> PAGE_SHIFT;
122 buf->dma_sgt = &buf->sg_table;
125 * NOTE: dma-sg allocates memory using the page allocator directly, so
126 * there is no memory consistency guarantee, hence dma-sg ignores DMA
127 * attributes passed from the upper layer.
129 buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
130 GFP_KERNEL | __GFP_ZERO);
132 goto fail_pages_array_alloc;
134 ret = vb2_dma_sg_alloc_compacted(buf, vb->vb2_queue->gfp_flags);
136 goto fail_pages_alloc;
138 ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
139 buf->num_pages, 0, size, GFP_KERNEL);
141 goto fail_table_alloc;
143 /* Prevent the device from being released while the buffer is used */
144 buf->dev = get_device(dev);
146 sgt = &buf->sg_table;
148 * No need to sync to the device, this will happen later when the
149 * prepare() memop is called.
151 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
152 DMA_ATTR_SKIP_CPU_SYNC))
155 buf->handler.refcount = &buf->refcount;
156 buf->handler.put = vb2_dma_sg_put;
157 buf->handler.arg = buf;
160 refcount_set(&buf->refcount, 1);
162 dprintk(1, "%s: Allocated buffer of %d pages\n",
163 __func__, buf->num_pages);
167 put_device(buf->dev);
168 sg_free_table(buf->dma_sgt);
170 num_pages = buf->num_pages;
172 __free_page(buf->pages[num_pages]);
175 fail_pages_array_alloc:
177 return ERR_PTR(-ENOMEM);
180 static void vb2_dma_sg_put(void *buf_priv)
182 struct vb2_dma_sg_buf *buf = buf_priv;
183 struct sg_table *sgt = &buf->sg_table;
184 int i = buf->num_pages;
186 if (refcount_dec_and_test(&buf->refcount)) {
187 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
189 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
190 DMA_ATTR_SKIP_CPU_SYNC);
192 vm_unmap_ram(buf->vaddr, buf->num_pages);
193 sg_free_table(buf->dma_sgt);
195 __free_page(buf->pages[i]);
197 put_device(buf->dev);
202 static void vb2_dma_sg_prepare(void *buf_priv)
204 struct vb2_dma_sg_buf *buf = buf_priv;
205 struct sg_table *sgt = buf->dma_sgt;
207 if (buf->vb->skip_cache_sync_on_prepare)
210 dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
213 static void vb2_dma_sg_finish(void *buf_priv)
215 struct vb2_dma_sg_buf *buf = buf_priv;
216 struct sg_table *sgt = buf->dma_sgt;
218 if (buf->vb->skip_cache_sync_on_finish)
221 dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
224 static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev,
225 unsigned long vaddr, unsigned long size)
227 struct vb2_dma_sg_buf *buf;
228 struct sg_table *sgt;
229 struct frame_vector *vec;
232 return ERR_PTR(-EINVAL);
234 buf = kzalloc(sizeof *buf, GFP_KERNEL);
236 return ERR_PTR(-ENOMEM);
240 buf->dma_dir = vb->vb2_queue->dma_dir;
241 buf->offset = vaddr & ~PAGE_MASK;
243 buf->dma_sgt = &buf->sg_table;
245 vec = vb2_create_framevec(vaddr, size);
247 goto userptr_fail_pfnvec;
250 buf->pages = frame_vector_pages(vec);
251 if (IS_ERR(buf->pages))
252 goto userptr_fail_sgtable;
253 buf->num_pages = frame_vector_count(vec);
255 if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
256 buf->num_pages, buf->offset, size, 0))
257 goto userptr_fail_sgtable;
259 sgt = &buf->sg_table;
261 * No need to sync to the device, this will happen later when the
262 * prepare() memop is called.
264 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
265 DMA_ATTR_SKIP_CPU_SYNC))
266 goto userptr_fail_map;
271 sg_free_table(&buf->sg_table);
272 userptr_fail_sgtable:
273 vb2_destroy_framevec(vec);
276 return ERR_PTR(-ENOMEM);
280 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
283 static void vb2_dma_sg_put_userptr(void *buf_priv)
285 struct vb2_dma_sg_buf *buf = buf_priv;
286 struct sg_table *sgt = &buf->sg_table;
287 int i = buf->num_pages;
289 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
290 __func__, buf->num_pages);
291 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
293 vm_unmap_ram(buf->vaddr, buf->num_pages);
294 sg_free_table(buf->dma_sgt);
295 if (buf->dma_dir == DMA_FROM_DEVICE ||
296 buf->dma_dir == DMA_BIDIRECTIONAL)
298 set_page_dirty_lock(buf->pages[i]);
299 vb2_destroy_framevec(buf->vec);
303 static void *vb2_dma_sg_vaddr(struct vb2_buffer *vb, void *buf_priv)
305 struct vb2_dma_sg_buf *buf = buf_priv;
306 struct dma_buf_map map;
312 if (buf->db_attach) {
313 ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
314 buf->vaddr = ret ? NULL : map.vaddr;
316 buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
320 /* add offset in case userptr is not page-aligned */
321 return buf->vaddr ? buf->vaddr + buf->offset : NULL;
324 static unsigned int vb2_dma_sg_num_users(void *buf_priv)
326 struct vb2_dma_sg_buf *buf = buf_priv;
328 return refcount_read(&buf->refcount);
331 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
333 struct vb2_dma_sg_buf *buf = buf_priv;
337 printk(KERN_ERR "No memory to map\n");
341 err = vm_map_pages(vma, buf->pages, buf->num_pages);
343 printk(KERN_ERR "Remapping memory, error: %d\n", err);
348 * Use common vm_area operations to track buffer refcount.
350 vma->vm_private_data = &buf->handler;
351 vma->vm_ops = &vb2_common_vm_ops;
353 vma->vm_ops->open(vma);
358 /*********************************************/
359 /* DMABUF ops for exporters */
360 /*********************************************/
362 struct vb2_dma_sg_attachment {
364 enum dma_data_direction dma_dir;
367 static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
368 struct dma_buf_attachment *dbuf_attach)
370 struct vb2_dma_sg_attachment *attach;
372 struct scatterlist *rd, *wr;
373 struct sg_table *sgt;
374 struct vb2_dma_sg_buf *buf = dbuf->priv;
377 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
382 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
383 * map the same scatter list to multiple attachments at the same time.
385 ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
391 rd = buf->dma_sgt->sgl;
393 for (i = 0; i < sgt->orig_nents; ++i) {
394 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
399 attach->dma_dir = DMA_NONE;
400 dbuf_attach->priv = attach;
405 static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
406 struct dma_buf_attachment *db_attach)
408 struct vb2_dma_sg_attachment *attach = db_attach->priv;
409 struct sg_table *sgt;
416 /* release the scatterlist cache */
417 if (attach->dma_dir != DMA_NONE)
418 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
421 db_attach->priv = NULL;
424 static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
425 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
427 struct vb2_dma_sg_attachment *attach = db_attach->priv;
428 /* stealing dmabuf mutex to serialize map/unmap operations */
429 struct mutex *lock = &db_attach->dmabuf->lock;
430 struct sg_table *sgt;
435 /* return previously mapped sg table */
436 if (attach->dma_dir == dma_dir) {
441 /* release any previous cache */
442 if (attach->dma_dir != DMA_NONE) {
443 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
444 attach->dma_dir = DMA_NONE;
447 /* mapping to the client with new direction */
448 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
449 pr_err("failed to map scatterlist\n");
451 return ERR_PTR(-EIO);
454 attach->dma_dir = dma_dir;
461 static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
462 struct sg_table *sgt, enum dma_data_direction dma_dir)
464 /* nothing to be done here */
467 static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
469 /* drop reference obtained in vb2_dma_sg_get_dmabuf */
470 vb2_dma_sg_put(dbuf->priv);
474 vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
475 enum dma_data_direction direction)
477 struct vb2_dma_sg_buf *buf = dbuf->priv;
478 struct sg_table *sgt = buf->dma_sgt;
480 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
485 vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
486 enum dma_data_direction direction)
488 struct vb2_dma_sg_buf *buf = dbuf->priv;
489 struct sg_table *sgt = buf->dma_sgt;
491 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
495 static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
497 struct vb2_dma_sg_buf *buf = dbuf->priv;
499 dma_buf_map_set_vaddr(map, buf->vaddr);
504 static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
505 struct vm_area_struct *vma)
507 return vb2_dma_sg_mmap(dbuf->priv, vma);
510 static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
511 .attach = vb2_dma_sg_dmabuf_ops_attach,
512 .detach = vb2_dma_sg_dmabuf_ops_detach,
513 .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
514 .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
515 .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
516 .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
517 .vmap = vb2_dma_sg_dmabuf_ops_vmap,
518 .mmap = vb2_dma_sg_dmabuf_ops_mmap,
519 .release = vb2_dma_sg_dmabuf_ops_release,
522 static struct dma_buf *vb2_dma_sg_get_dmabuf(struct vb2_buffer *vb,
526 struct vb2_dma_sg_buf *buf = buf_priv;
527 struct dma_buf *dbuf;
528 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
530 exp_info.ops = &vb2_dma_sg_dmabuf_ops;
531 exp_info.size = buf->size;
532 exp_info.flags = flags;
535 if (WARN_ON(!buf->dma_sgt))
538 dbuf = dma_buf_export(&exp_info);
542 /* dmabuf keeps reference to vb2 buffer */
543 refcount_inc(&buf->refcount);
548 /*********************************************/
549 /* callbacks for DMABUF buffers */
550 /*********************************************/
552 static int vb2_dma_sg_map_dmabuf(void *mem_priv)
554 struct vb2_dma_sg_buf *buf = mem_priv;
555 struct sg_table *sgt;
557 if (WARN_ON(!buf->db_attach)) {
558 pr_err("trying to pin a non attached buffer\n");
562 if (WARN_ON(buf->dma_sgt)) {
563 pr_err("dmabuf buffer is already pinned\n");
567 /* get the associated scatterlist for this buffer */
568 sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
570 pr_err("Error getting dmabuf scatterlist\n");
580 static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
582 struct vb2_dma_sg_buf *buf = mem_priv;
583 struct sg_table *sgt = buf->dma_sgt;
584 struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
586 if (WARN_ON(!buf->db_attach)) {
587 pr_err("trying to unpin a not attached buffer\n");
592 pr_err("dmabuf buffer is already unpinned\n");
597 dma_buf_vunmap(buf->db_attach->dmabuf, &map);
600 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
605 static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
607 struct vb2_dma_sg_buf *buf = mem_priv;
609 /* if vb2 works correctly you should never detach mapped buffer */
610 if (WARN_ON(buf->dma_sgt))
611 vb2_dma_sg_unmap_dmabuf(buf);
613 /* detach this attachment */
614 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
618 static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
619 struct dma_buf *dbuf, unsigned long size)
621 struct vb2_dma_sg_buf *buf;
622 struct dma_buf_attachment *dba;
625 return ERR_PTR(-EINVAL);
627 if (dbuf->size < size)
628 return ERR_PTR(-EFAULT);
630 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
632 return ERR_PTR(-ENOMEM);
635 /* create attachment for the dmabuf with the user device */
636 dba = dma_buf_attach(dbuf, buf->dev);
638 pr_err("failed to attach dmabuf\n");
643 buf->dma_dir = vb->vb2_queue->dma_dir;
645 buf->db_attach = dba;
651 static void *vb2_dma_sg_cookie(struct vb2_buffer *vb, void *buf_priv)
653 struct vb2_dma_sg_buf *buf = buf_priv;
658 const struct vb2_mem_ops vb2_dma_sg_memops = {
659 .alloc = vb2_dma_sg_alloc,
660 .put = vb2_dma_sg_put,
661 .get_userptr = vb2_dma_sg_get_userptr,
662 .put_userptr = vb2_dma_sg_put_userptr,
663 .prepare = vb2_dma_sg_prepare,
664 .finish = vb2_dma_sg_finish,
665 .vaddr = vb2_dma_sg_vaddr,
666 .mmap = vb2_dma_sg_mmap,
667 .num_users = vb2_dma_sg_num_users,
668 .get_dmabuf = vb2_dma_sg_get_dmabuf,
669 .map_dmabuf = vb2_dma_sg_map_dmabuf,
670 .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf,
671 .attach_dmabuf = vb2_dma_sg_attach_dmabuf,
672 .detach_dmabuf = vb2_dma_sg_detach_dmabuf,
673 .cookie = vb2_dma_sg_cookie,
675 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
677 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
678 MODULE_AUTHOR("Andrzej Pietrasiewicz");
679 MODULE_LICENSE("GPL");
680 MODULE_IMPORT_NS(DMA_BUF);