1 // SPDX-License-Identifier: GPL-2.0
4 * Xen dma-buf functionality for gntdev.
6 * DMA buffer implementation is based on drivers/gpu/drm/drm_prime.c.
8 * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/dma-buf.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/uaccess.h>
17 #include <linux/module.h>
20 #include <xen/grant_table.h>
22 #include "gntdev-common.h"
23 #include "gntdev-dmabuf.h"
25 MODULE_IMPORT_NS(DMA_BUF);
27 #ifndef GRANT_INVALID_REF
29 * Note on usage of grant reference 0 as invalid grant reference:
30 * grant reference 0 is valid, but never exposed to a driver,
31 * because of the fact it is already in use/reserved by the PV console.
33 #define GRANT_INVALID_REF 0
36 struct gntdev_dmabuf {
37 struct gntdev_dmabuf_priv *priv;
38 struct dma_buf *dmabuf;
39 struct list_head next;
44 /* Exported buffers are reference counted. */
47 struct gntdev_priv *priv;
48 struct gntdev_grant_map *map;
51 /* Granted references of the imported buffer. */
53 /* Scatter-gather table of the imported buffer. */
55 /* dma-buf attachment of the imported buffer. */
56 struct dma_buf_attachment *attach;
60 /* Number of pages this buffer has. */
62 /* Pages of this buffer. */
66 struct gntdev_dmabuf_wait_obj {
67 struct list_head next;
68 struct gntdev_dmabuf *gntdev_dmabuf;
69 struct completion completion;
72 struct gntdev_dmabuf_attachment {
74 enum dma_data_direction dir;
77 struct gntdev_dmabuf_priv {
78 /* List of exported DMA buffers. */
79 struct list_head exp_list;
80 /* List of wait objects. */
81 struct list_head exp_wait_list;
82 /* List of imported DMA buffers. */
83 struct list_head imp_list;
84 /* This is the lock which protects dma_buf_xxx lists. */
87 * We reference this file while exporting dma-bufs, so
88 * the grant device context is not destroyed while there are
89 * external users alive.
94 /* DMA buffer export support. */
96 /* Implementation of wait for exported DMA buffer to be released. */
98 static void dmabuf_exp_release(struct kref *kref);
100 static struct gntdev_dmabuf_wait_obj *
101 dmabuf_exp_wait_obj_new(struct gntdev_dmabuf_priv *priv,
102 struct gntdev_dmabuf *gntdev_dmabuf)
104 struct gntdev_dmabuf_wait_obj *obj;
106 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
108 return ERR_PTR(-ENOMEM);
110 init_completion(&obj->completion);
111 obj->gntdev_dmabuf = gntdev_dmabuf;
113 mutex_lock(&priv->lock);
114 list_add(&obj->next, &priv->exp_wait_list);
115 /* Put our reference and wait for gntdev_dmabuf's release to fire. */
116 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
117 mutex_unlock(&priv->lock);
121 static void dmabuf_exp_wait_obj_free(struct gntdev_dmabuf_priv *priv,
122 struct gntdev_dmabuf_wait_obj *obj)
124 mutex_lock(&priv->lock);
125 list_del(&obj->next);
126 mutex_unlock(&priv->lock);
130 static int dmabuf_exp_wait_obj_wait(struct gntdev_dmabuf_wait_obj *obj,
133 if (wait_for_completion_timeout(&obj->completion,
134 msecs_to_jiffies(wait_to_ms)) <= 0)
140 static void dmabuf_exp_wait_obj_signal(struct gntdev_dmabuf_priv *priv,
141 struct gntdev_dmabuf *gntdev_dmabuf)
143 struct gntdev_dmabuf_wait_obj *obj;
145 list_for_each_entry(obj, &priv->exp_wait_list, next)
146 if (obj->gntdev_dmabuf == gntdev_dmabuf) {
147 pr_debug("Found gntdev_dmabuf in the wait list, wake\n");
148 complete_all(&obj->completion);
153 static struct gntdev_dmabuf *
154 dmabuf_exp_wait_obj_get_dmabuf(struct gntdev_dmabuf_priv *priv, int fd)
156 struct gntdev_dmabuf *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
158 mutex_lock(&priv->lock);
159 list_for_each_entry(gntdev_dmabuf, &priv->exp_list, next)
160 if (gntdev_dmabuf->fd == fd) {
161 pr_debug("Found gntdev_dmabuf in the wait list\n");
162 kref_get(&gntdev_dmabuf->u.exp.refcount);
166 mutex_unlock(&priv->lock);
170 static int dmabuf_exp_wait_released(struct gntdev_dmabuf_priv *priv, int fd,
173 struct gntdev_dmabuf *gntdev_dmabuf;
174 struct gntdev_dmabuf_wait_obj *obj;
177 pr_debug("Will wait for dma-buf with fd %d\n", fd);
179 * Try to find the DMA buffer: if not found means that
180 * either the buffer has already been released or file descriptor
183 gntdev_dmabuf = dmabuf_exp_wait_obj_get_dmabuf(priv, fd);
184 if (IS_ERR(gntdev_dmabuf))
185 return PTR_ERR(gntdev_dmabuf);
188 * gntdev_dmabuf still exists and is reference count locked by us now,
189 * so prepare to wait: allocate wait object and add it to the wait list,
190 * so we can find it on release.
192 obj = dmabuf_exp_wait_obj_new(priv, gntdev_dmabuf);
196 ret = dmabuf_exp_wait_obj_wait(obj, wait_to_ms);
197 dmabuf_exp_wait_obj_free(priv, obj);
201 /* DMA buffer export support. */
203 static struct sg_table *
204 dmabuf_pages_to_sgt(struct page **pages, unsigned int nr_pages)
206 struct sg_table *sgt;
209 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
215 ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0,
216 nr_pages << PAGE_SHIFT,
228 static int dmabuf_exp_ops_attach(struct dma_buf *dma_buf,
229 struct dma_buf_attachment *attach)
231 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach;
233 gntdev_dmabuf_attach = kzalloc(sizeof(*gntdev_dmabuf_attach),
235 if (!gntdev_dmabuf_attach)
238 gntdev_dmabuf_attach->dir = DMA_NONE;
239 attach->priv = gntdev_dmabuf_attach;
243 static void dmabuf_exp_ops_detach(struct dma_buf *dma_buf,
244 struct dma_buf_attachment *attach)
246 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
248 if (gntdev_dmabuf_attach) {
249 struct sg_table *sgt = gntdev_dmabuf_attach->sgt;
252 if (gntdev_dmabuf_attach->dir != DMA_NONE)
253 dma_unmap_sgtable(attach->dev, sgt,
254 gntdev_dmabuf_attach->dir,
255 DMA_ATTR_SKIP_CPU_SYNC);
260 kfree(gntdev_dmabuf_attach);
265 static struct sg_table *
266 dmabuf_exp_ops_map_dma_buf(struct dma_buf_attachment *attach,
267 enum dma_data_direction dir)
269 struct gntdev_dmabuf_attachment *gntdev_dmabuf_attach = attach->priv;
270 struct gntdev_dmabuf *gntdev_dmabuf = attach->dmabuf->priv;
271 struct sg_table *sgt;
273 pr_debug("Mapping %d pages for dev %p\n", gntdev_dmabuf->nr_pages,
276 if (dir == DMA_NONE || !gntdev_dmabuf_attach)
277 return ERR_PTR(-EINVAL);
279 /* Return the cached mapping when possible. */
280 if (gntdev_dmabuf_attach->dir == dir)
281 return gntdev_dmabuf_attach->sgt;
284 * Two mappings with different directions for the same attachment are
287 if (gntdev_dmabuf_attach->dir != DMA_NONE)
288 return ERR_PTR(-EBUSY);
290 sgt = dmabuf_pages_to_sgt(gntdev_dmabuf->pages,
291 gntdev_dmabuf->nr_pages);
293 if (dma_map_sgtable(attach->dev, sgt, dir,
294 DMA_ATTR_SKIP_CPU_SYNC)) {
297 sgt = ERR_PTR(-ENOMEM);
299 gntdev_dmabuf_attach->sgt = sgt;
300 gntdev_dmabuf_attach->dir = dir;
304 pr_debug("Failed to map sg table for dev %p\n", attach->dev);
308 static void dmabuf_exp_ops_unmap_dma_buf(struct dma_buf_attachment *attach,
309 struct sg_table *sgt,
310 enum dma_data_direction dir)
312 /* Not implemented. The unmap is done at dmabuf_exp_ops_detach(). */
315 static void dmabuf_exp_release(struct kref *kref)
317 struct gntdev_dmabuf *gntdev_dmabuf =
318 container_of(kref, struct gntdev_dmabuf, u.exp.refcount);
320 dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
321 list_del(&gntdev_dmabuf->next);
322 fput(gntdev_dmabuf->priv->filp);
323 kfree(gntdev_dmabuf);
326 static void dmabuf_exp_remove_map(struct gntdev_priv *priv,
327 struct gntdev_grant_map *map)
329 mutex_lock(&priv->lock);
330 list_del(&map->next);
331 gntdev_put_map(NULL /* already removed */, map);
332 mutex_unlock(&priv->lock);
335 static void dmabuf_exp_ops_release(struct dma_buf *dma_buf)
337 struct gntdev_dmabuf *gntdev_dmabuf = dma_buf->priv;
338 struct gntdev_dmabuf_priv *priv = gntdev_dmabuf->priv;
340 dmabuf_exp_remove_map(gntdev_dmabuf->u.exp.priv,
341 gntdev_dmabuf->u.exp.map);
342 mutex_lock(&priv->lock);
343 kref_put(&gntdev_dmabuf->u.exp.refcount, dmabuf_exp_release);
344 mutex_unlock(&priv->lock);
347 static const struct dma_buf_ops dmabuf_exp_ops = {
348 .attach = dmabuf_exp_ops_attach,
349 .detach = dmabuf_exp_ops_detach,
350 .map_dma_buf = dmabuf_exp_ops_map_dma_buf,
351 .unmap_dma_buf = dmabuf_exp_ops_unmap_dma_buf,
352 .release = dmabuf_exp_ops_release,
355 struct gntdev_dmabuf_export_args {
356 struct gntdev_priv *priv;
357 struct gntdev_grant_map *map;
358 struct gntdev_dmabuf_priv *dmabuf_priv;
365 static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
367 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
368 struct gntdev_dmabuf *gntdev_dmabuf;
371 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
375 kref_init(&gntdev_dmabuf->u.exp.refcount);
377 gntdev_dmabuf->priv = args->dmabuf_priv;
378 gntdev_dmabuf->nr_pages = args->count;
379 gntdev_dmabuf->pages = args->pages;
380 gntdev_dmabuf->u.exp.priv = args->priv;
381 gntdev_dmabuf->u.exp.map = args->map;
383 exp_info.exp_name = KBUILD_MODNAME;
384 if (args->dev->driver && args->dev->driver->owner)
385 exp_info.owner = args->dev->driver->owner;
387 exp_info.owner = THIS_MODULE;
388 exp_info.ops = &dmabuf_exp_ops;
389 exp_info.size = args->count << PAGE_SHIFT;
390 exp_info.flags = O_RDWR;
391 exp_info.priv = gntdev_dmabuf;
393 gntdev_dmabuf->dmabuf = dma_buf_export(&exp_info);
394 if (IS_ERR(gntdev_dmabuf->dmabuf)) {
395 ret = PTR_ERR(gntdev_dmabuf->dmabuf);
396 gntdev_dmabuf->dmabuf = NULL;
400 ret = dma_buf_fd(gntdev_dmabuf->dmabuf, O_CLOEXEC);
404 gntdev_dmabuf->fd = ret;
407 pr_debug("Exporting DMA buffer with fd %d\n", ret);
409 mutex_lock(&args->dmabuf_priv->lock);
410 list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
411 mutex_unlock(&args->dmabuf_priv->lock);
412 get_file(gntdev_dmabuf->priv->filp);
416 if (gntdev_dmabuf->dmabuf)
417 dma_buf_put(gntdev_dmabuf->dmabuf);
418 kfree(gntdev_dmabuf);
422 static struct gntdev_grant_map *
423 dmabuf_exp_alloc_backing_storage(struct gntdev_priv *priv, int dmabuf_flags,
426 struct gntdev_grant_map *map;
428 if (unlikely(gntdev_test_page_count(count)))
429 return ERR_PTR(-EINVAL);
431 if ((dmabuf_flags & GNTDEV_DMA_FLAG_WC) &&
432 (dmabuf_flags & GNTDEV_DMA_FLAG_COHERENT)) {
433 pr_debug("Wrong dma-buf flags: 0x%x\n", dmabuf_flags);
434 return ERR_PTR(-EINVAL);
437 map = gntdev_alloc_map(priv, count, dmabuf_flags);
439 return ERR_PTR(-ENOMEM);
444 static int dmabuf_exp_from_refs(struct gntdev_priv *priv, int flags,
445 int count, u32 domid, u32 *refs, u32 *fd)
447 struct gntdev_grant_map *map;
448 struct gntdev_dmabuf_export_args args;
451 map = dmabuf_exp_alloc_backing_storage(priv, flags, count);
455 for (i = 0; i < count; i++) {
456 map->grants[i].domid = domid;
457 map->grants[i].ref = refs[i];
460 mutex_lock(&priv->lock);
461 gntdev_add_map(priv, map);
462 mutex_unlock(&priv->lock);
464 map->flags |= GNTMAP_host_map;
465 #if defined(CONFIG_X86)
466 map->flags |= GNTMAP_device_map;
469 ret = gntdev_map_grant_pages(map);
475 args.dev = priv->dma_dev;
476 args.dmabuf_priv = priv->dmabuf_priv;
477 args.count = map->count;
478 args.pages = map->pages;
479 args.fd = -1; /* Shut up unnecessary gcc warning for i386 */
481 ret = dmabuf_exp_from_pages(&args);
489 dmabuf_exp_remove_map(priv, map);
493 /* DMA buffer import support. */
496 dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
497 int count, int domid)
499 grant_ref_t priv_gref_head;
502 ret = gnttab_alloc_grant_references(count, &priv_gref_head);
504 pr_debug("Cannot allocate grant references, ret %d\n", ret);
508 for (i = 0; i < count; i++) {
511 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
514 pr_debug("Cannot claim grant reference, ret %d\n", ret);
518 gnttab_grant_foreign_access_ref(cur_ref, domid,
519 xen_page_to_gfn(pages[i]), 0);
526 gnttab_free_grant_references(priv_gref_head);
530 static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
534 for (i = 0; i < count; i++)
535 if (refs[i] != GRANT_INVALID_REF)
536 gnttab_end_foreign_access(refs[i], 0, 0UL);
539 static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
541 kfree(gntdev_dmabuf->pages);
542 kfree(gntdev_dmabuf->u.imp.refs);
543 kfree(gntdev_dmabuf);
546 static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
548 struct gntdev_dmabuf *gntdev_dmabuf;
551 gntdev_dmabuf = kzalloc(sizeof(*gntdev_dmabuf), GFP_KERNEL);
555 gntdev_dmabuf->u.imp.refs = kcalloc(count,
556 sizeof(gntdev_dmabuf->u.imp.refs[0]),
558 if (!gntdev_dmabuf->u.imp.refs)
561 gntdev_dmabuf->pages = kcalloc(count,
562 sizeof(gntdev_dmabuf->pages[0]),
564 if (!gntdev_dmabuf->pages)
567 gntdev_dmabuf->nr_pages = count;
569 for (i = 0; i < count; i++)
570 gntdev_dmabuf->u.imp.refs[i] = GRANT_INVALID_REF;
572 return gntdev_dmabuf;
575 dmabuf_imp_free_storage(gntdev_dmabuf);
577 return ERR_PTR(-ENOMEM);
580 static struct gntdev_dmabuf *
581 dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
582 int fd, int count, int domid)
584 struct gntdev_dmabuf *gntdev_dmabuf, *ret;
585 struct dma_buf *dma_buf;
586 struct dma_buf_attachment *attach;
587 struct sg_table *sgt;
588 struct sg_page_iter sg_iter;
591 dma_buf = dma_buf_get(fd);
593 return ERR_CAST(dma_buf);
595 gntdev_dmabuf = dmabuf_imp_alloc_storage(count);
596 if (IS_ERR(gntdev_dmabuf)) {
601 gntdev_dmabuf->priv = priv;
602 gntdev_dmabuf->fd = fd;
604 attach = dma_buf_attach(dma_buf, dev);
605 if (IS_ERR(attach)) {
606 ret = ERR_CAST(attach);
610 gntdev_dmabuf->u.imp.attach = attach;
612 sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
618 /* Check that we have zero offset. */
619 if (sgt->sgl->offset) {
620 ret = ERR_PTR(-EINVAL);
621 pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n",
626 /* Check number of pages that imported buffer has. */
627 if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) {
628 ret = ERR_PTR(-EINVAL);
629 pr_debug("DMA buffer has %zu pages, user-space expects %d\n",
630 attach->dmabuf->size, gntdev_dmabuf->nr_pages);
634 gntdev_dmabuf->u.imp.sgt = sgt;
636 /* Now convert sgt to array of pages and check for page validity. */
638 for_each_sgtable_page(sgt, &sg_iter, 0) {
639 struct page *page = sg_page_iter_page(&sg_iter);
641 * Check if page is valid: this can happen if we are given
642 * a page from VRAM or other resources which are not backed
645 if (!pfn_valid(page_to_pfn(page))) {
646 ret = ERR_PTR(-EINVAL);
650 gntdev_dmabuf->pages[i++] = page;
653 ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
654 gntdev_dmabuf->u.imp.refs,
657 goto fail_end_access;
659 pr_debug("Imported DMA buffer with fd %d\n", fd);
661 mutex_lock(&priv->lock);
662 list_add(&gntdev_dmabuf->next, &priv->imp_list);
663 mutex_unlock(&priv->lock);
665 return gntdev_dmabuf;
668 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
670 dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
672 dma_buf_detach(dma_buf, attach);
674 dmabuf_imp_free_storage(gntdev_dmabuf);
676 dma_buf_put(dma_buf);
681 * Find the hyper dma-buf by its file descriptor and remove
682 * it from the buffer's list.
684 static struct gntdev_dmabuf *
685 dmabuf_imp_find_unlink(struct gntdev_dmabuf_priv *priv, int fd)
687 struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT);
689 mutex_lock(&priv->lock);
690 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) {
691 if (gntdev_dmabuf->fd == fd) {
692 pr_debug("Found gntdev_dmabuf in the import list\n");
694 list_del(&gntdev_dmabuf->next);
698 mutex_unlock(&priv->lock);
702 static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
704 struct gntdev_dmabuf *gntdev_dmabuf;
705 struct dma_buf_attachment *attach;
706 struct dma_buf *dma_buf;
708 gntdev_dmabuf = dmabuf_imp_find_unlink(priv, fd);
709 if (IS_ERR(gntdev_dmabuf))
710 return PTR_ERR(gntdev_dmabuf);
712 pr_debug("Releasing DMA buffer with fd %d\n", fd);
714 dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs,
715 gntdev_dmabuf->nr_pages);
717 attach = gntdev_dmabuf->u.imp.attach;
719 if (gntdev_dmabuf->u.imp.sgt)
720 dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
722 dma_buf = attach->dmabuf;
723 dma_buf_detach(attach->dmabuf, attach);
724 dma_buf_put(dma_buf);
726 dmabuf_imp_free_storage(gntdev_dmabuf);
730 static void dmabuf_imp_release_all(struct gntdev_dmabuf_priv *priv)
732 struct gntdev_dmabuf *q, *gntdev_dmabuf;
734 list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next)
735 dmabuf_imp_release(priv, gntdev_dmabuf->fd);
738 /* DMA buffer IOCTL support. */
740 long gntdev_ioctl_dmabuf_exp_from_refs(struct gntdev_priv *priv, int use_ptemod,
741 struct ioctl_gntdev_dmabuf_exp_from_refs __user *u)
743 struct ioctl_gntdev_dmabuf_exp_from_refs op;
748 pr_debug("Cannot provide dma-buf: use_ptemode %d\n",
753 if (copy_from_user(&op, u, sizeof(op)) != 0)
756 if (unlikely(gntdev_test_page_count(op.count)))
759 refs = kcalloc(op.count, sizeof(*refs), GFP_KERNEL);
763 if (copy_from_user(refs, u->refs, sizeof(*refs) * op.count) != 0) {
768 ret = dmabuf_exp_from_refs(priv, op.flags, op.count,
769 op.domid, refs, &op.fd);
773 if (copy_to_user(u, &op, sizeof(op)) != 0)
781 long gntdev_ioctl_dmabuf_exp_wait_released(struct gntdev_priv *priv,
782 struct ioctl_gntdev_dmabuf_exp_wait_released __user *u)
784 struct ioctl_gntdev_dmabuf_exp_wait_released op;
786 if (copy_from_user(&op, u, sizeof(op)) != 0)
789 return dmabuf_exp_wait_released(priv->dmabuf_priv, op.fd,
793 long gntdev_ioctl_dmabuf_imp_to_refs(struct gntdev_priv *priv,
794 struct ioctl_gntdev_dmabuf_imp_to_refs __user *u)
796 struct ioctl_gntdev_dmabuf_imp_to_refs op;
797 struct gntdev_dmabuf *gntdev_dmabuf;
800 if (copy_from_user(&op, u, sizeof(op)) != 0)
803 if (unlikely(gntdev_test_page_count(op.count)))
806 gntdev_dmabuf = dmabuf_imp_to_refs(priv->dmabuf_priv,
807 priv->dma_dev, op.fd,
809 if (IS_ERR(gntdev_dmabuf))
810 return PTR_ERR(gntdev_dmabuf);
812 if (copy_to_user(u->refs, gntdev_dmabuf->u.imp.refs,
813 sizeof(*u->refs) * op.count) != 0) {
820 dmabuf_imp_release(priv->dmabuf_priv, op.fd);
824 long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
825 struct ioctl_gntdev_dmabuf_imp_release __user *u)
827 struct ioctl_gntdev_dmabuf_imp_release op;
829 if (copy_from_user(&op, u, sizeof(op)) != 0)
832 return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
835 struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
837 struct gntdev_dmabuf_priv *priv;
839 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
841 return ERR_PTR(-ENOMEM);
843 mutex_init(&priv->lock);
844 INIT_LIST_HEAD(&priv->exp_list);
845 INIT_LIST_HEAD(&priv->exp_wait_list);
846 INIT_LIST_HEAD(&priv->imp_list);
853 void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv)
855 dmabuf_imp_release_all(priv);