media: videobuf2-dma-contig: fix bad kfree in vb2_dma_contig_clear_max_seg_size
[linux-2.6-microblaze.git] / drivers / media / common / videobuf2 / videobuf2-dma-contig.c
1 /*
2  * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12
13 #include <linux/dma-buf.h>
14 #include <linux/module.h>
15 #include <linux/refcount.h>
16 #include <linux/scatterlist.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dma-mapping.h>
20
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-dma-contig.h>
23 #include <media/videobuf2-memops.h>
24
25 struct vb2_dc_buf {
26         struct device                   *dev;
27         void                            *vaddr;
28         unsigned long                   size;
29         void                            *cookie;
30         dma_addr_t                      dma_addr;
31         unsigned long                   attrs;
32         enum dma_data_direction         dma_dir;
33         struct sg_table                 *dma_sgt;
34         struct frame_vector             *vec;
35
36         /* MMAP related */
37         struct vb2_vmarea_handler       handler;
38         refcount_t                      refcount;
39         struct sg_table                 *sgt_base;
40
41         /* DMABUF related */
42         struct dma_buf_attachment       *db_attach;
43 };
44
45 /*********************************************/
46 /*        scatterlist table functions        */
47 /*********************************************/
48
49 static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
50 {
51         struct scatterlist *s;
52         dma_addr_t expected = sg_dma_address(sgt->sgl);
53         unsigned int i;
54         unsigned long size = 0;
55
56         for_each_sg(sgt->sgl, s, sgt->nents, i) {
57                 if (sg_dma_address(s) != expected)
58                         break;
59                 expected = sg_dma_address(s) + sg_dma_len(s);
60                 size += sg_dma_len(s);
61         }
62         return size;
63 }
64
65 /*********************************************/
66 /*         callbacks for all buffers         */
67 /*********************************************/
68
69 static void *vb2_dc_cookie(void *buf_priv)
70 {
71         struct vb2_dc_buf *buf = buf_priv;
72
73         return &buf->dma_addr;
74 }
75
76 static void *vb2_dc_vaddr(void *buf_priv)
77 {
78         struct vb2_dc_buf *buf = buf_priv;
79
80         if (!buf->vaddr && buf->db_attach)
81                 buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
82
83         return buf->vaddr;
84 }
85
86 static unsigned int vb2_dc_num_users(void *buf_priv)
87 {
88         struct vb2_dc_buf *buf = buf_priv;
89
90         return refcount_read(&buf->refcount);
91 }
92
93 static void vb2_dc_prepare(void *buf_priv)
94 {
95         struct vb2_dc_buf *buf = buf_priv;
96         struct sg_table *sgt = buf->dma_sgt;
97
98         /* DMABUF exporter will flush the cache for us */
99         if (!sgt || buf->db_attach)
100                 return;
101
102         dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
103                                buf->dma_dir);
104 }
105
106 static void vb2_dc_finish(void *buf_priv)
107 {
108         struct vb2_dc_buf *buf = buf_priv;
109         struct sg_table *sgt = buf->dma_sgt;
110
111         /* DMABUF exporter will flush the cache for us */
112         if (!sgt || buf->db_attach)
113                 return;
114
115         dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
116 }
117
118 /*********************************************/
119 /*        callbacks for MMAP buffers         */
120 /*********************************************/
121
122 static void vb2_dc_put(void *buf_priv)
123 {
124         struct vb2_dc_buf *buf = buf_priv;
125
126         if (!refcount_dec_and_test(&buf->refcount))
127                 return;
128
129         if (buf->sgt_base) {
130                 sg_free_table(buf->sgt_base);
131                 kfree(buf->sgt_base);
132         }
133         dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
134                        buf->attrs);
135         put_device(buf->dev);
136         kfree(buf);
137 }
138
139 static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
140                           unsigned long size, enum dma_data_direction dma_dir,
141                           gfp_t gfp_flags)
142 {
143         struct vb2_dc_buf *buf;
144
145         if (WARN_ON(!dev))
146                 return ERR_PTR(-EINVAL);
147
148         buf = kzalloc(sizeof *buf, GFP_KERNEL);
149         if (!buf)
150                 return ERR_PTR(-ENOMEM);
151
152         if (attrs)
153                 buf->attrs = attrs;
154         buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
155                                         GFP_KERNEL | gfp_flags, buf->attrs);
156         if (!buf->cookie) {
157                 dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
158                 kfree(buf);
159                 return ERR_PTR(-ENOMEM);
160         }
161
162         if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
163                 buf->vaddr = buf->cookie;
164
165         /* Prevent the device from being released while the buffer is used */
166         buf->dev = get_device(dev);
167         buf->size = size;
168         buf->dma_dir = dma_dir;
169
170         buf->handler.refcount = &buf->refcount;
171         buf->handler.put = vb2_dc_put;
172         buf->handler.arg = buf;
173
174         refcount_set(&buf->refcount, 1);
175
176         return buf;
177 }
178
179 static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
180 {
181         struct vb2_dc_buf *buf = buf_priv;
182         int ret;
183
184         if (!buf) {
185                 printk(KERN_ERR "No buffer to map\n");
186                 return -EINVAL;
187         }
188
189         ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
190                 buf->dma_addr, buf->size, buf->attrs);
191
192         if (ret) {
193                 pr_err("Remapping memory failed, error: %d\n", ret);
194                 return ret;
195         }
196
197         vma->vm_flags           |= VM_DONTEXPAND | VM_DONTDUMP;
198         vma->vm_private_data    = &buf->handler;
199         vma->vm_ops             = &vb2_common_vm_ops;
200
201         vma->vm_ops->open(vma);
202
203         pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
204                 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
205                 buf->size);
206
207         return 0;
208 }
209
210 /*********************************************/
211 /*         DMABUF ops for exporters          */
212 /*********************************************/
213
214 struct vb2_dc_attachment {
215         struct sg_table sgt;
216         enum dma_data_direction dma_dir;
217 };
218
219 static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
220         struct dma_buf_attachment *dbuf_attach)
221 {
222         struct vb2_dc_attachment *attach;
223         unsigned int i;
224         struct scatterlist *rd, *wr;
225         struct sg_table *sgt;
226         struct vb2_dc_buf *buf = dbuf->priv;
227         int ret;
228
229         attach = kzalloc(sizeof(*attach), GFP_KERNEL);
230         if (!attach)
231                 return -ENOMEM;
232
233         sgt = &attach->sgt;
234         /* Copy the buf->base_sgt scatter list to the attachment, as we can't
235          * map the same scatter list to multiple attachments at the same time.
236          */
237         ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
238         if (ret) {
239                 kfree(attach);
240                 return -ENOMEM;
241         }
242
243         rd = buf->sgt_base->sgl;
244         wr = sgt->sgl;
245         for (i = 0; i < sgt->orig_nents; ++i) {
246                 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
247                 rd = sg_next(rd);
248                 wr = sg_next(wr);
249         }
250
251         attach->dma_dir = DMA_NONE;
252         dbuf_attach->priv = attach;
253
254         return 0;
255 }
256
257 static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
258         struct dma_buf_attachment *db_attach)
259 {
260         struct vb2_dc_attachment *attach = db_attach->priv;
261         struct sg_table *sgt;
262
263         if (!attach)
264                 return;
265
266         sgt = &attach->sgt;
267
268         /* release the scatterlist cache */
269         if (attach->dma_dir != DMA_NONE)
270                 /*
271                  * Cache sync can be skipped here, as the vb2_dc memory is
272                  * allocated from device coherent memory, which means the
273                  * memory locations do not require any explicit cache
274                  * maintenance prior or after being used by the device.
275                  */
276                 dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
277                                    attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
278         sg_free_table(sgt);
279         kfree(attach);
280         db_attach->priv = NULL;
281 }
282
283 static struct sg_table *vb2_dc_dmabuf_ops_map(
284         struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
285 {
286         struct vb2_dc_attachment *attach = db_attach->priv;
287         /* stealing dmabuf mutex to serialize map/unmap operations */
288         struct mutex *lock = &db_attach->dmabuf->lock;
289         struct sg_table *sgt;
290
291         mutex_lock(lock);
292
293         sgt = &attach->sgt;
294         /* return previously mapped sg table */
295         if (attach->dma_dir == dma_dir) {
296                 mutex_unlock(lock);
297                 return sgt;
298         }
299
300         /* release any previous cache */
301         if (attach->dma_dir != DMA_NONE) {
302                 dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
303                                    attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
304                 attach->dma_dir = DMA_NONE;
305         }
306
307         /*
308          * mapping to the client with new direction, no cache sync
309          * required see comment in vb2_dc_dmabuf_ops_detach()
310          */
311         sgt->nents = dma_map_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
312                                       dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
313         if (!sgt->nents) {
314                 pr_err("failed to map scatterlist\n");
315                 mutex_unlock(lock);
316                 return ERR_PTR(-EIO);
317         }
318
319         attach->dma_dir = dma_dir;
320
321         mutex_unlock(lock);
322
323         return sgt;
324 }
325
326 static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
327         struct sg_table *sgt, enum dma_data_direction dma_dir)
328 {
329         /* nothing to be done here */
330 }
331
332 static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
333 {
334         /* drop reference obtained in vb2_dc_get_dmabuf */
335         vb2_dc_put(dbuf->priv);
336 }
337
338 static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
339 {
340         struct vb2_dc_buf *buf = dbuf->priv;
341
342         return buf->vaddr;
343 }
344
345 static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
346         struct vm_area_struct *vma)
347 {
348         return vb2_dc_mmap(dbuf->priv, vma);
349 }
350
351 static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
352         .attach = vb2_dc_dmabuf_ops_attach,
353         .detach = vb2_dc_dmabuf_ops_detach,
354         .map_dma_buf = vb2_dc_dmabuf_ops_map,
355         .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
356         .vmap = vb2_dc_dmabuf_ops_vmap,
357         .mmap = vb2_dc_dmabuf_ops_mmap,
358         .release = vb2_dc_dmabuf_ops_release,
359 };
360
361 static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
362 {
363         int ret;
364         struct sg_table *sgt;
365
366         sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
367         if (!sgt) {
368                 dev_err(buf->dev, "failed to alloc sg table\n");
369                 return NULL;
370         }
371
372         ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
373                 buf->size, buf->attrs);
374         if (ret < 0) {
375                 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
376                 kfree(sgt);
377                 return NULL;
378         }
379
380         return sgt;
381 }
382
383 static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
384 {
385         struct vb2_dc_buf *buf = buf_priv;
386         struct dma_buf *dbuf;
387         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
388
389         exp_info.ops = &vb2_dc_dmabuf_ops;
390         exp_info.size = buf->size;
391         exp_info.flags = flags;
392         exp_info.priv = buf;
393
394         if (!buf->sgt_base)
395                 buf->sgt_base = vb2_dc_get_base_sgt(buf);
396
397         if (WARN_ON(!buf->sgt_base))
398                 return NULL;
399
400         dbuf = dma_buf_export(&exp_info);
401         if (IS_ERR(dbuf))
402                 return NULL;
403
404         /* dmabuf keeps reference to vb2 buffer */
405         refcount_inc(&buf->refcount);
406
407         return dbuf;
408 }
409
410 /*********************************************/
411 /*       callbacks for USERPTR buffers       */
412 /*********************************************/
413
414 static void vb2_dc_put_userptr(void *buf_priv)
415 {
416         struct vb2_dc_buf *buf = buf_priv;
417         struct sg_table *sgt = buf->dma_sgt;
418         int i;
419         struct page **pages;
420
421         if (sgt) {
422                 /*
423                  * No need to sync to CPU, it's already synced to the CPU
424                  * since the finish() memop will have been called before this.
425                  */
426                 dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
427                                    buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
428                 pages = frame_vector_pages(buf->vec);
429                 /* sgt should exist only if vector contains pages... */
430                 BUG_ON(IS_ERR(pages));
431                 if (buf->dma_dir == DMA_FROM_DEVICE ||
432                     buf->dma_dir == DMA_BIDIRECTIONAL)
433                         for (i = 0; i < frame_vector_count(buf->vec); i++)
434                                 set_page_dirty_lock(pages[i]);
435                 sg_free_table(sgt);
436                 kfree(sgt);
437         } else {
438                 dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
439                                    buf->dma_dir, 0);
440         }
441         vb2_destroy_framevec(buf->vec);
442         kfree(buf);
443 }
444
445 static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
446         unsigned long size, enum dma_data_direction dma_dir)
447 {
448         struct vb2_dc_buf *buf;
449         struct frame_vector *vec;
450         unsigned int offset;
451         int n_pages, i;
452         int ret = 0;
453         struct sg_table *sgt;
454         unsigned long contig_size;
455         unsigned long dma_align = dma_get_cache_alignment();
456
457         /* Only cache aligned DMA transfers are reliable */
458         if (!IS_ALIGNED(vaddr | size, dma_align)) {
459                 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
460                 return ERR_PTR(-EINVAL);
461         }
462
463         if (!size) {
464                 pr_debug("size is zero\n");
465                 return ERR_PTR(-EINVAL);
466         }
467
468         if (WARN_ON(!dev))
469                 return ERR_PTR(-EINVAL);
470
471         buf = kzalloc(sizeof *buf, GFP_KERNEL);
472         if (!buf)
473                 return ERR_PTR(-ENOMEM);
474
475         buf->dev = dev;
476         buf->dma_dir = dma_dir;
477
478         offset = lower_32_bits(offset_in_page(vaddr));
479         vec = vb2_create_framevec(vaddr, size);
480         if (IS_ERR(vec)) {
481                 ret = PTR_ERR(vec);
482                 goto fail_buf;
483         }
484         buf->vec = vec;
485         n_pages = frame_vector_count(vec);
486         ret = frame_vector_to_pages(vec);
487         if (ret < 0) {
488                 unsigned long *nums = frame_vector_pfns(vec);
489
490                 /*
491                  * Failed to convert to pages... Check the memory is physically
492                  * contiguous and use direct mapping
493                  */
494                 for (i = 1; i < n_pages; i++)
495                         if (nums[i-1] + 1 != nums[i])
496                                 goto fail_pfnvec;
497                 buf->dma_addr = dma_map_resource(buf->dev,
498                                 __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
499                 if (dma_mapping_error(buf->dev, buf->dma_addr)) {
500                         ret = -ENOMEM;
501                         goto fail_pfnvec;
502                 }
503                 goto out;
504         }
505
506         sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
507         if (!sgt) {
508                 pr_err("failed to allocate sg table\n");
509                 ret = -ENOMEM;
510                 goto fail_pfnvec;
511         }
512
513         ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
514                 offset, size, GFP_KERNEL);
515         if (ret) {
516                 pr_err("failed to initialize sg table\n");
517                 goto fail_sgt;
518         }
519
520         /*
521          * No need to sync to the device, this will happen later when the
522          * prepare() memop is called.
523          */
524         sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
525                                       buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
526         if (sgt->nents <= 0) {
527                 pr_err("failed to map scatterlist\n");
528                 ret = -EIO;
529                 goto fail_sgt_init;
530         }
531
532         contig_size = vb2_dc_get_contiguous_size(sgt);
533         if (contig_size < size) {
534                 pr_err("contiguous mapping is too small %lu/%lu\n",
535                         contig_size, size);
536                 ret = -EFAULT;
537                 goto fail_map_sg;
538         }
539
540         buf->dma_addr = sg_dma_address(sgt->sgl);
541         buf->dma_sgt = sgt;
542 out:
543         buf->size = size;
544
545         return buf;
546
547 fail_map_sg:
548         dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
549                            buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
550
551 fail_sgt_init:
552         sg_free_table(sgt);
553
554 fail_sgt:
555         kfree(sgt);
556
557 fail_pfnvec:
558         vb2_destroy_framevec(vec);
559
560 fail_buf:
561         kfree(buf);
562
563         return ERR_PTR(ret);
564 }
565
566 /*********************************************/
567 /*       callbacks for DMABUF buffers        */
568 /*********************************************/
569
570 static int vb2_dc_map_dmabuf(void *mem_priv)
571 {
572         struct vb2_dc_buf *buf = mem_priv;
573         struct sg_table *sgt;
574         unsigned long contig_size;
575
576         if (WARN_ON(!buf->db_attach)) {
577                 pr_err("trying to pin a non attached buffer\n");
578                 return -EINVAL;
579         }
580
581         if (WARN_ON(buf->dma_sgt)) {
582                 pr_err("dmabuf buffer is already pinned\n");
583                 return 0;
584         }
585
586         /* get the associated scatterlist for this buffer */
587         sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
588         if (IS_ERR(sgt)) {
589                 pr_err("Error getting dmabuf scatterlist\n");
590                 return -EINVAL;
591         }
592
593         /* checking if dmabuf is big enough to store contiguous chunk */
594         contig_size = vb2_dc_get_contiguous_size(sgt);
595         if (contig_size < buf->size) {
596                 pr_err("contiguous chunk is too small %lu/%lu\n",
597                        contig_size, buf->size);
598                 dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
599                 return -EFAULT;
600         }
601
602         buf->dma_addr = sg_dma_address(sgt->sgl);
603         buf->dma_sgt = sgt;
604         buf->vaddr = NULL;
605
606         return 0;
607 }
608
609 static void vb2_dc_unmap_dmabuf(void *mem_priv)
610 {
611         struct vb2_dc_buf *buf = mem_priv;
612         struct sg_table *sgt = buf->dma_sgt;
613
614         if (WARN_ON(!buf->db_attach)) {
615                 pr_err("trying to unpin a not attached buffer\n");
616                 return;
617         }
618
619         if (WARN_ON(!sgt)) {
620                 pr_err("dmabuf buffer is already unpinned\n");
621                 return;
622         }
623
624         if (buf->vaddr) {
625                 dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
626                 buf->vaddr = NULL;
627         }
628         dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
629
630         buf->dma_addr = 0;
631         buf->dma_sgt = NULL;
632 }
633
634 static void vb2_dc_detach_dmabuf(void *mem_priv)
635 {
636         struct vb2_dc_buf *buf = mem_priv;
637
638         /* if vb2 works correctly you should never detach mapped buffer */
639         if (WARN_ON(buf->dma_addr))
640                 vb2_dc_unmap_dmabuf(buf);
641
642         /* detach this attachment */
643         dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
644         kfree(buf);
645 }
646
647 static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
648         unsigned long size, enum dma_data_direction dma_dir)
649 {
650         struct vb2_dc_buf *buf;
651         struct dma_buf_attachment *dba;
652
653         if (dbuf->size < size)
654                 return ERR_PTR(-EFAULT);
655
656         if (WARN_ON(!dev))
657                 return ERR_PTR(-EINVAL);
658
659         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
660         if (!buf)
661                 return ERR_PTR(-ENOMEM);
662
663         buf->dev = dev;
664         /* create attachment for the dmabuf with the user device */
665         dba = dma_buf_attach(dbuf, buf->dev);
666         if (IS_ERR(dba)) {
667                 pr_err("failed to attach dmabuf\n");
668                 kfree(buf);
669                 return dba;
670         }
671
672         buf->dma_dir = dma_dir;
673         buf->size = size;
674         buf->db_attach = dba;
675
676         return buf;
677 }
678
679 /*********************************************/
680 /*       DMA CONTIG exported functions       */
681 /*********************************************/
682
683 const struct vb2_mem_ops vb2_dma_contig_memops = {
684         .alloc          = vb2_dc_alloc,
685         .put            = vb2_dc_put,
686         .get_dmabuf     = vb2_dc_get_dmabuf,
687         .cookie         = vb2_dc_cookie,
688         .vaddr          = vb2_dc_vaddr,
689         .mmap           = vb2_dc_mmap,
690         .get_userptr    = vb2_dc_get_userptr,
691         .put_userptr    = vb2_dc_put_userptr,
692         .prepare        = vb2_dc_prepare,
693         .finish         = vb2_dc_finish,
694         .map_dmabuf     = vb2_dc_map_dmabuf,
695         .unmap_dmabuf   = vb2_dc_unmap_dmabuf,
696         .attach_dmabuf  = vb2_dc_attach_dmabuf,
697         .detach_dmabuf  = vb2_dc_detach_dmabuf,
698         .num_users      = vb2_dc_num_users,
699 };
700 EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
701
702 /**
703  * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
704  * @dev:        device for configuring DMA parameters
705  * @size:       size of DMA max segment size to set
706  *
707  * To allow mapping the scatter-list into a single chunk in the DMA
708  * address space, the device is required to have the DMA max segment
709  * size parameter set to a value larger than the buffer size. Otherwise,
710  * the DMA-mapping subsystem will split the mapping into max segment
711  * size chunks. This function sets the DMA max segment size
712  * parameter to let DMA-mapping map a buffer as a single chunk in DMA
713  * address space.
714  * This code assumes that the DMA-mapping subsystem will merge all
715  * scatterlist segments if this is really possible (for example when
716  * an IOMMU is available and enabled).
717  * Ideally, this parameter should be set by the generic bus code, but it
718  * is left with the default 64KiB value due to historical litmiations in
719  * other subsystems (like limited USB host drivers) and there no good
720  * place to set it to the proper value.
721  * This function should be called from the drivers, which are known to
722  * operate on platforms with IOMMU and provide access to shared buffers
723  * (either USERPTR or DMABUF). This should be done before initializing
724  * videobuf2 queue.
725  */
726 int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
727 {
728         if (!dev->dma_parms) {
729                 dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n");
730                 return -ENODEV;
731         }
732         if (dma_get_max_seg_size(dev) < size)
733                 return dma_set_max_seg_size(dev, size);
734
735         return 0;
736 }
737 EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
738
739 MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
740 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
741 MODULE_LICENSE("GPL");