Merge tag 'media/v5.16-2' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[linux-2.6-microblaze.git] / drivers / media / common / videobuf2 / videobuf2-dma-sg.c
1 /*
2  * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/module.h>
14 #include <linux/mm.h>
15 #include <linux/refcount.h>
16 #include <linux/scatterlist.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-memops.h>
23 #include <media/videobuf2-dma-sg.h>
24
25 static int debug;
26 module_param(debug, int, 0644);
27
28 #define dprintk(level, fmt, arg...)                                     \
29         do {                                                            \
30                 if (debug >= level)                                     \
31                         printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg);  \
32         } while (0)
33
34 struct vb2_dma_sg_buf {
35         struct device                   *dev;
36         void                            *vaddr;
37         struct page                     **pages;
38         struct frame_vector             *vec;
39         int                             offset;
40         enum dma_data_direction         dma_dir;
41         struct sg_table                 sg_table;
42         /*
43          * This will point to sg_table when used with the MMAP or USERPTR
44          * memory model, and to the dma_buf sglist when used with the
45          * DMABUF memory model.
46          */
47         struct sg_table                 *dma_sgt;
48         size_t                          size;
49         unsigned int                    num_pages;
50         refcount_t                      refcount;
51         struct vb2_vmarea_handler       handler;
52
53         struct dma_buf_attachment       *db_attach;
54
55         struct vb2_buffer               *vb;
56 };
57
58 static void vb2_dma_sg_put(void *buf_priv);
59
60 static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
61                 gfp_t gfp_flags)
62 {
63         unsigned int last_page = 0;
64         unsigned long size = buf->size;
65
66         while (size > 0) {
67                 struct page *pages;
68                 int order;
69                 int i;
70
71                 order = get_order(size);
72                 /* Don't over allocate*/
73                 if ((PAGE_SIZE << order) > size)
74                         order--;
75
76                 pages = NULL;
77                 while (!pages) {
78                         pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
79                                         __GFP_NOWARN | gfp_flags, order);
80                         if (pages)
81                                 break;
82
83                         if (order == 0) {
84                                 while (last_page--)
85                                         __free_page(buf->pages[last_page]);
86                                 return -ENOMEM;
87                         }
88                         order--;
89                 }
90
91                 split_page(pages, order);
92                 for (i = 0; i < (1 << order); i++)
93                         buf->pages[last_page++] = &pages[i];
94
95                 size -= PAGE_SIZE << order;
96         }
97
98         return 0;
99 }
100
101 static void *vb2_dma_sg_alloc(struct vb2_buffer *vb, struct device *dev,
102                               unsigned long size)
103 {
104         struct vb2_dma_sg_buf *buf;
105         struct sg_table *sgt;
106         int ret;
107         int num_pages;
108
109         if (WARN_ON(!dev) || WARN_ON(!size))
110                 return ERR_PTR(-EINVAL);
111
112         buf = kzalloc(sizeof *buf, GFP_KERNEL);
113         if (!buf)
114                 return ERR_PTR(-ENOMEM);
115
116         buf->vaddr = NULL;
117         buf->dma_dir = vb->vb2_queue->dma_dir;
118         buf->offset = 0;
119         buf->size = size;
120         /* size is already page aligned */
121         buf->num_pages = size >> PAGE_SHIFT;
122         buf->dma_sgt = &buf->sg_table;
123
124         /*
125          * NOTE: dma-sg allocates memory using the page allocator directly, so
126          * there is no memory consistency guarantee, hence dma-sg ignores DMA
127          * attributes passed from the upper layer.
128          */
129         buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *),
130                                     GFP_KERNEL | __GFP_ZERO);
131         if (!buf->pages)
132                 goto fail_pages_array_alloc;
133
134         ret = vb2_dma_sg_alloc_compacted(buf, vb->vb2_queue->gfp_flags);
135         if (ret)
136                 goto fail_pages_alloc;
137
138         ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
139                         buf->num_pages, 0, size, GFP_KERNEL);
140         if (ret)
141                 goto fail_table_alloc;
142
143         /* Prevent the device from being released while the buffer is used */
144         buf->dev = get_device(dev);
145
146         sgt = &buf->sg_table;
147         /*
148          * No need to sync to the device, this will happen later when the
149          * prepare() memop is called.
150          */
151         if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
152                             DMA_ATTR_SKIP_CPU_SYNC))
153                 goto fail_map;
154
155         buf->handler.refcount = &buf->refcount;
156         buf->handler.put = vb2_dma_sg_put;
157         buf->handler.arg = buf;
158         buf->vb = vb;
159
160         refcount_set(&buf->refcount, 1);
161
162         dprintk(1, "%s: Allocated buffer of %d pages\n",
163                 __func__, buf->num_pages);
164         return buf;
165
166 fail_map:
167         put_device(buf->dev);
168         sg_free_table(buf->dma_sgt);
169 fail_table_alloc:
170         num_pages = buf->num_pages;
171         while (num_pages--)
172                 __free_page(buf->pages[num_pages]);
173 fail_pages_alloc:
174         kvfree(buf->pages);
175 fail_pages_array_alloc:
176         kfree(buf);
177         return ERR_PTR(-ENOMEM);
178 }
179
180 static void vb2_dma_sg_put(void *buf_priv)
181 {
182         struct vb2_dma_sg_buf *buf = buf_priv;
183         struct sg_table *sgt = &buf->sg_table;
184         int i = buf->num_pages;
185
186         if (refcount_dec_and_test(&buf->refcount)) {
187                 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
188                         buf->num_pages);
189                 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
190                                   DMA_ATTR_SKIP_CPU_SYNC);
191                 if (buf->vaddr)
192                         vm_unmap_ram(buf->vaddr, buf->num_pages);
193                 sg_free_table(buf->dma_sgt);
194                 while (--i >= 0)
195                         __free_page(buf->pages[i]);
196                 kvfree(buf->pages);
197                 put_device(buf->dev);
198                 kfree(buf);
199         }
200 }
201
202 static void vb2_dma_sg_prepare(void *buf_priv)
203 {
204         struct vb2_dma_sg_buf *buf = buf_priv;
205         struct sg_table *sgt = buf->dma_sgt;
206
207         if (buf->vb->skip_cache_sync_on_prepare)
208                 return;
209
210         dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
211 }
212
213 static void vb2_dma_sg_finish(void *buf_priv)
214 {
215         struct vb2_dma_sg_buf *buf = buf_priv;
216         struct sg_table *sgt = buf->dma_sgt;
217
218         if (buf->vb->skip_cache_sync_on_finish)
219                 return;
220
221         dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
222 }
223
224 static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev,
225                                     unsigned long vaddr, unsigned long size)
226 {
227         struct vb2_dma_sg_buf *buf;
228         struct sg_table *sgt;
229         struct frame_vector *vec;
230
231         if (WARN_ON(!dev))
232                 return ERR_PTR(-EINVAL);
233
234         buf = kzalloc(sizeof *buf, GFP_KERNEL);
235         if (!buf)
236                 return ERR_PTR(-ENOMEM);
237
238         buf->vaddr = NULL;
239         buf->dev = dev;
240         buf->dma_dir = vb->vb2_queue->dma_dir;
241         buf->offset = vaddr & ~PAGE_MASK;
242         buf->size = size;
243         buf->dma_sgt = &buf->sg_table;
244         buf->vb = vb;
245         vec = vb2_create_framevec(vaddr, size);
246         if (IS_ERR(vec))
247                 goto userptr_fail_pfnvec;
248         buf->vec = vec;
249
250         buf->pages = frame_vector_pages(vec);
251         if (IS_ERR(buf->pages))
252                 goto userptr_fail_sgtable;
253         buf->num_pages = frame_vector_count(vec);
254
255         if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
256                         buf->num_pages, buf->offset, size, 0))
257                 goto userptr_fail_sgtable;
258
259         sgt = &buf->sg_table;
260         /*
261          * No need to sync to the device, this will happen later when the
262          * prepare() memop is called.
263          */
264         if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
265                             DMA_ATTR_SKIP_CPU_SYNC))
266                 goto userptr_fail_map;
267
268         return buf;
269
270 userptr_fail_map:
271         sg_free_table(&buf->sg_table);
272 userptr_fail_sgtable:
273         vb2_destroy_framevec(vec);
274 userptr_fail_pfnvec:
275         kfree(buf);
276         return ERR_PTR(-ENOMEM);
277 }
278
279 /*
280  * @put_userptr: inform the allocator that a USERPTR buffer will no longer
281  *               be used
282  */
283 static void vb2_dma_sg_put_userptr(void *buf_priv)
284 {
285         struct vb2_dma_sg_buf *buf = buf_priv;
286         struct sg_table *sgt = &buf->sg_table;
287         int i = buf->num_pages;
288
289         dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
290                __func__, buf->num_pages);
291         dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
292         if (buf->vaddr)
293                 vm_unmap_ram(buf->vaddr, buf->num_pages);
294         sg_free_table(buf->dma_sgt);
295         if (buf->dma_dir == DMA_FROM_DEVICE ||
296             buf->dma_dir == DMA_BIDIRECTIONAL)
297                 while (--i >= 0)
298                         set_page_dirty_lock(buf->pages[i]);
299         vb2_destroy_framevec(buf->vec);
300         kfree(buf);
301 }
302
303 static void *vb2_dma_sg_vaddr(struct vb2_buffer *vb, void *buf_priv)
304 {
305         struct vb2_dma_sg_buf *buf = buf_priv;
306         struct dma_buf_map map;
307         int ret;
308
309         BUG_ON(!buf);
310
311         if (!buf->vaddr) {
312                 if (buf->db_attach) {
313                         ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
314                         buf->vaddr = ret ? NULL : map.vaddr;
315                 } else {
316                         buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
317                 }
318         }
319
320         /* add offset in case userptr is not page-aligned */
321         return buf->vaddr ? buf->vaddr + buf->offset : NULL;
322 }
323
324 static unsigned int vb2_dma_sg_num_users(void *buf_priv)
325 {
326         struct vb2_dma_sg_buf *buf = buf_priv;
327
328         return refcount_read(&buf->refcount);
329 }
330
331 static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
332 {
333         struct vb2_dma_sg_buf *buf = buf_priv;
334         int err;
335
336         if (!buf) {
337                 printk(KERN_ERR "No memory to map\n");
338                 return -EINVAL;
339         }
340
341         err = vm_map_pages(vma, buf->pages, buf->num_pages);
342         if (err) {
343                 printk(KERN_ERR "Remapping memory, error: %d\n", err);
344                 return err;
345         }
346
347         /*
348          * Use common vm_area operations to track buffer refcount.
349          */
350         vma->vm_private_data    = &buf->handler;
351         vma->vm_ops             = &vb2_common_vm_ops;
352
353         vma->vm_ops->open(vma);
354
355         return 0;
356 }
357
358 /*********************************************/
359 /*         DMABUF ops for exporters          */
360 /*********************************************/
361
362 struct vb2_dma_sg_attachment {
363         struct sg_table sgt;
364         enum dma_data_direction dma_dir;
365 };
366
367 static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
368         struct dma_buf_attachment *dbuf_attach)
369 {
370         struct vb2_dma_sg_attachment *attach;
371         unsigned int i;
372         struct scatterlist *rd, *wr;
373         struct sg_table *sgt;
374         struct vb2_dma_sg_buf *buf = dbuf->priv;
375         int ret;
376
377         attach = kzalloc(sizeof(*attach), GFP_KERNEL);
378         if (!attach)
379                 return -ENOMEM;
380
381         sgt = &attach->sgt;
382         /* Copy the buf->base_sgt scatter list to the attachment, as we can't
383          * map the same scatter list to multiple attachments at the same time.
384          */
385         ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
386         if (ret) {
387                 kfree(attach);
388                 return -ENOMEM;
389         }
390
391         rd = buf->dma_sgt->sgl;
392         wr = sgt->sgl;
393         for (i = 0; i < sgt->orig_nents; ++i) {
394                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
395                 rd = sg_next(rd);
396                 wr = sg_next(wr);
397         }
398
399         attach->dma_dir = DMA_NONE;
400         dbuf_attach->priv = attach;
401
402         return 0;
403 }
404
405 static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
406         struct dma_buf_attachment *db_attach)
407 {
408         struct vb2_dma_sg_attachment *attach = db_attach->priv;
409         struct sg_table *sgt;
410
411         if (!attach)
412                 return;
413
414         sgt = &attach->sgt;
415
416         /* release the scatterlist cache */
417         if (attach->dma_dir != DMA_NONE)
418                 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
419         sg_free_table(sgt);
420         kfree(attach);
421         db_attach->priv = NULL;
422 }
423
424 static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
425         struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
426 {
427         struct vb2_dma_sg_attachment *attach = db_attach->priv;
428         /* stealing dmabuf mutex to serialize map/unmap operations */
429         struct mutex *lock = &db_attach->dmabuf->lock;
430         struct sg_table *sgt;
431
432         mutex_lock(lock);
433
434         sgt = &attach->sgt;
435         /* return previously mapped sg table */
436         if (attach->dma_dir == dma_dir) {
437                 mutex_unlock(lock);
438                 return sgt;
439         }
440
441         /* release any previous cache */
442         if (attach->dma_dir != DMA_NONE) {
443                 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
444                 attach->dma_dir = DMA_NONE;
445         }
446
447         /* mapping to the client with new direction */
448         if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
449                 pr_err("failed to map scatterlist\n");
450                 mutex_unlock(lock);
451                 return ERR_PTR(-EIO);
452         }
453
454         attach->dma_dir = dma_dir;
455
456         mutex_unlock(lock);
457
458         return sgt;
459 }
460
461 static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
462         struct sg_table *sgt, enum dma_data_direction dma_dir)
463 {
464         /* nothing to be done here */
465 }
466
467 static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
468 {
469         /* drop reference obtained in vb2_dma_sg_get_dmabuf */
470         vb2_dma_sg_put(dbuf->priv);
471 }
472
473 static int
474 vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
475                                        enum dma_data_direction direction)
476 {
477         struct vb2_dma_sg_buf *buf = dbuf->priv;
478         struct sg_table *sgt = buf->dma_sgt;
479
480         dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
481         return 0;
482 }
483
484 static int
485 vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
486                                      enum dma_data_direction direction)
487 {
488         struct vb2_dma_sg_buf *buf = dbuf->priv;
489         struct sg_table *sgt = buf->dma_sgt;
490
491         dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
492         return 0;
493 }
494
495 static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
496 {
497         struct vb2_dma_sg_buf *buf = dbuf->priv;
498
499         dma_buf_map_set_vaddr(map, buf->vaddr);
500
501         return 0;
502 }
503
504 static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
505         struct vm_area_struct *vma)
506 {
507         return vb2_dma_sg_mmap(dbuf->priv, vma);
508 }
509
510 static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
511         .attach = vb2_dma_sg_dmabuf_ops_attach,
512         .detach = vb2_dma_sg_dmabuf_ops_detach,
513         .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
514         .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
515         .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
516         .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
517         .vmap = vb2_dma_sg_dmabuf_ops_vmap,
518         .mmap = vb2_dma_sg_dmabuf_ops_mmap,
519         .release = vb2_dma_sg_dmabuf_ops_release,
520 };
521
522 static struct dma_buf *vb2_dma_sg_get_dmabuf(struct vb2_buffer *vb,
523                                              void *buf_priv,
524                                              unsigned long flags)
525 {
526         struct vb2_dma_sg_buf *buf = buf_priv;
527         struct dma_buf *dbuf;
528         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
529
530         exp_info.ops = &vb2_dma_sg_dmabuf_ops;
531         exp_info.size = buf->size;
532         exp_info.flags = flags;
533         exp_info.priv = buf;
534
535         if (WARN_ON(!buf->dma_sgt))
536                 return NULL;
537
538         dbuf = dma_buf_export(&exp_info);
539         if (IS_ERR(dbuf))
540                 return NULL;
541
542         /* dmabuf keeps reference to vb2 buffer */
543         refcount_inc(&buf->refcount);
544
545         return dbuf;
546 }
547
548 /*********************************************/
549 /*       callbacks for DMABUF buffers        */
550 /*********************************************/
551
552 static int vb2_dma_sg_map_dmabuf(void *mem_priv)
553 {
554         struct vb2_dma_sg_buf *buf = mem_priv;
555         struct sg_table *sgt;
556
557         if (WARN_ON(!buf->db_attach)) {
558                 pr_err("trying to pin a non attached buffer\n");
559                 return -EINVAL;
560         }
561
562         if (WARN_ON(buf->dma_sgt)) {
563                 pr_err("dmabuf buffer is already pinned\n");
564                 return 0;
565         }
566
567         /* get the associated scatterlist for this buffer */
568         sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
569         if (IS_ERR(sgt)) {
570                 pr_err("Error getting dmabuf scatterlist\n");
571                 return -EINVAL;
572         }
573
574         buf->dma_sgt = sgt;
575         buf->vaddr = NULL;
576
577         return 0;
578 }
579
580 static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
581 {
582         struct vb2_dma_sg_buf *buf = mem_priv;
583         struct sg_table *sgt = buf->dma_sgt;
584         struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
585
586         if (WARN_ON(!buf->db_attach)) {
587                 pr_err("trying to unpin a not attached buffer\n");
588                 return;
589         }
590
591         if (WARN_ON(!sgt)) {
592                 pr_err("dmabuf buffer is already unpinned\n");
593                 return;
594         }
595
596         if (buf->vaddr) {
597                 dma_buf_vunmap(buf->db_attach->dmabuf, &map);
598                 buf->vaddr = NULL;
599         }
600         dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
601
602         buf->dma_sgt = NULL;
603 }
604
605 static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
606 {
607         struct vb2_dma_sg_buf *buf = mem_priv;
608
609         /* if vb2 works correctly you should never detach mapped buffer */
610         if (WARN_ON(buf->dma_sgt))
611                 vb2_dma_sg_unmap_dmabuf(buf);
612
613         /* detach this attachment */
614         dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
615         kfree(buf);
616 }
617
618 static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
619                                       struct dma_buf *dbuf, unsigned long size)
620 {
621         struct vb2_dma_sg_buf *buf;
622         struct dma_buf_attachment *dba;
623
624         if (WARN_ON(!dev))
625                 return ERR_PTR(-EINVAL);
626
627         if (dbuf->size < size)
628                 return ERR_PTR(-EFAULT);
629
630         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
631         if (!buf)
632                 return ERR_PTR(-ENOMEM);
633
634         buf->dev = dev;
635         /* create attachment for the dmabuf with the user device */
636         dba = dma_buf_attach(dbuf, buf->dev);
637         if (IS_ERR(dba)) {
638                 pr_err("failed to attach dmabuf\n");
639                 kfree(buf);
640                 return dba;
641         }
642
643         buf->dma_dir = vb->vb2_queue->dma_dir;
644         buf->size = size;
645         buf->db_attach = dba;
646         buf->vb = vb;
647
648         return buf;
649 }
650
651 static void *vb2_dma_sg_cookie(struct vb2_buffer *vb, void *buf_priv)
652 {
653         struct vb2_dma_sg_buf *buf = buf_priv;
654
655         return buf->dma_sgt;
656 }
657
658 const struct vb2_mem_ops vb2_dma_sg_memops = {
659         .alloc          = vb2_dma_sg_alloc,
660         .put            = vb2_dma_sg_put,
661         .get_userptr    = vb2_dma_sg_get_userptr,
662         .put_userptr    = vb2_dma_sg_put_userptr,
663         .prepare        = vb2_dma_sg_prepare,
664         .finish         = vb2_dma_sg_finish,
665         .vaddr          = vb2_dma_sg_vaddr,
666         .mmap           = vb2_dma_sg_mmap,
667         .num_users      = vb2_dma_sg_num_users,
668         .get_dmabuf     = vb2_dma_sg_get_dmabuf,
669         .map_dmabuf     = vb2_dma_sg_map_dmabuf,
670         .unmap_dmabuf   = vb2_dma_sg_unmap_dmabuf,
671         .attach_dmabuf  = vb2_dma_sg_attach_dmabuf,
672         .detach_dmabuf  = vb2_dma_sg_detach_dmabuf,
673         .cookie         = vb2_dma_sg_cookie,
674 };
675 EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
676
677 MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
678 MODULE_AUTHOR("Andrzej Pietrasiewicz");
679 MODULE_LICENSE("GPL");
680 MODULE_IMPORT_NS(DMA_BUF);