dma-buf: Use struct dma_buf_map in dma_buf_vunmap() interfaces
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gem / i915_gem_dmabuf.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright 2012 Red Hat Inc
5  */
6
7 #include <linux/dma-buf.h>
8 #include <linux/highmem.h>
9 #include <linux/dma-resv.h>
10
11 #include "i915_drv.h"
12 #include "i915_gem_object.h"
13 #include "i915_scatterlist.h"
14
15 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
16 {
17         return to_intel_bo(buf->priv);
18 }
19
20 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
21                                              enum dma_data_direction dir)
22 {
23         struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
24         struct sg_table *st;
25         struct scatterlist *src, *dst;
26         int ret, i;
27
28         ret = i915_gem_object_pin_pages(obj);
29         if (ret)
30                 goto err;
31
32         /* Copy sg so that we make an independent mapping */
33         st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
34         if (st == NULL) {
35                 ret = -ENOMEM;
36                 goto err_unpin_pages;
37         }
38
39         ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
40         if (ret)
41                 goto err_free;
42
43         src = obj->mm.pages->sgl;
44         dst = st->sgl;
45         for (i = 0; i < obj->mm.pages->nents; i++) {
46                 sg_set_page(dst, sg_page(src), src->length, 0);
47                 dst = sg_next(dst);
48                 src = sg_next(src);
49         }
50
51         if (!dma_map_sg_attrs(attachment->dev,
52                               st->sgl, st->nents, dir,
53                               DMA_ATTR_SKIP_CPU_SYNC)) {
54                 ret = -ENOMEM;
55                 goto err_free_sg;
56         }
57
58         return st;
59
60 err_free_sg:
61         sg_free_table(st);
62 err_free:
63         kfree(st);
64 err_unpin_pages:
65         i915_gem_object_unpin_pages(obj);
66 err:
67         return ERR_PTR(ret);
68 }
69
70 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
71                                    struct sg_table *sg,
72                                    enum dma_data_direction dir)
73 {
74         struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
75
76         dma_unmap_sg_attrs(attachment->dev,
77                            sg->sgl, sg->nents, dir,
78                            DMA_ATTR_SKIP_CPU_SYNC);
79         sg_free_table(sg);
80         kfree(sg);
81
82         i915_gem_object_unpin_pages(obj);
83 }
84
85 static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
86 {
87         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
88         void *vaddr;
89
90         vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
91         if (IS_ERR(vaddr))
92                 return PTR_ERR(vaddr);
93
94         dma_buf_map_set_vaddr(map, vaddr);
95
96         return 0;
97 }
98
99 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
100 {
101         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
102
103         i915_gem_object_flush_map(obj);
104         i915_gem_object_unpin_map(obj);
105 }
106
107 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
108 {
109         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
110         int ret;
111
112         if (obj->base.size < vma->vm_end - vma->vm_start)
113                 return -EINVAL;
114
115         if (!obj->base.filp)
116                 return -ENODEV;
117
118         ret = call_mmap(obj->base.filp, vma);
119         if (ret)
120                 return ret;
121
122         fput(vma->vm_file);
123         vma->vm_file = get_file(obj->base.filp);
124
125         return 0;
126 }
127
128 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
129 {
130         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
131         bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
132         int err;
133
134         err = i915_gem_object_pin_pages(obj);
135         if (err)
136                 return err;
137
138         err = i915_gem_object_lock_interruptible(obj, NULL);
139         if (err)
140                 goto out;
141
142         err = i915_gem_object_set_to_cpu_domain(obj, write);
143         i915_gem_object_unlock(obj);
144
145 out:
146         i915_gem_object_unpin_pages(obj);
147         return err;
148 }
149
150 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
151 {
152         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
153         int err;
154
155         err = i915_gem_object_pin_pages(obj);
156         if (err)
157                 return err;
158
159         err = i915_gem_object_lock_interruptible(obj, NULL);
160         if (err)
161                 goto out;
162
163         err = i915_gem_object_set_to_gtt_domain(obj, false);
164         i915_gem_object_unlock(obj);
165
166 out:
167         i915_gem_object_unpin_pages(obj);
168         return err;
169 }
170
171 static const struct dma_buf_ops i915_dmabuf_ops =  {
172         .map_dma_buf = i915_gem_map_dma_buf,
173         .unmap_dma_buf = i915_gem_unmap_dma_buf,
174         .release = drm_gem_dmabuf_release,
175         .mmap = i915_gem_dmabuf_mmap,
176         .vmap = i915_gem_dmabuf_vmap,
177         .vunmap = i915_gem_dmabuf_vunmap,
178         .begin_cpu_access = i915_gem_begin_cpu_access,
179         .end_cpu_access = i915_gem_end_cpu_access,
180 };
181
182 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
183 {
184         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
185         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
186
187         exp_info.ops = &i915_dmabuf_ops;
188         exp_info.size = gem_obj->size;
189         exp_info.flags = flags;
190         exp_info.priv = gem_obj;
191         exp_info.resv = obj->base.resv;
192
193         if (obj->ops->dmabuf_export) {
194                 int ret = obj->ops->dmabuf_export(obj);
195                 if (ret)
196                         return ERR_PTR(ret);
197         }
198
199         return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
200 }
201
202 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
203 {
204         struct sg_table *pages;
205         unsigned int sg_page_sizes;
206
207         pages = dma_buf_map_attachment(obj->base.import_attach,
208                                        DMA_BIDIRECTIONAL);
209         if (IS_ERR(pages))
210                 return PTR_ERR(pages);
211
212         sg_page_sizes = i915_sg_page_sizes(pages->sgl);
213
214         __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
215
216         return 0;
217 }
218
219 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
220                                              struct sg_table *pages)
221 {
222         dma_buf_unmap_attachment(obj->base.import_attach, pages,
223                                  DMA_BIDIRECTIONAL);
224 }
225
226 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
227         .name = "i915_gem_object_dmabuf",
228         .get_pages = i915_gem_object_get_pages_dmabuf,
229         .put_pages = i915_gem_object_put_pages_dmabuf,
230 };
231
232 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
233                                              struct dma_buf *dma_buf)
234 {
235         static struct lock_class_key lock_class;
236         struct dma_buf_attachment *attach;
237         struct drm_i915_gem_object *obj;
238         int ret;
239
240         /* is this one of own objects? */
241         if (dma_buf->ops == &i915_dmabuf_ops) {
242                 obj = dma_buf_to_obj(dma_buf);
243                 /* is it from our device? */
244                 if (obj->base.dev == dev) {
245                         /*
246                          * Importing dmabuf exported from out own gem increases
247                          * refcount on gem itself instead of f_count of dmabuf.
248                          */
249                         return &i915_gem_object_get(obj)->base;
250                 }
251         }
252
253         /* need to attach */
254         attach = dma_buf_attach(dma_buf, dev->dev);
255         if (IS_ERR(attach))
256                 return ERR_CAST(attach);
257
258         get_dma_buf(dma_buf);
259
260         obj = i915_gem_object_alloc();
261         if (obj == NULL) {
262                 ret = -ENOMEM;
263                 goto fail_detach;
264         }
265
266         drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
267         i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class);
268         obj->base.import_attach = attach;
269         obj->base.resv = dma_buf->resv;
270
271         /* We use GTT as shorthand for a coherent domain, one that is
272          * neither in the GPU cache nor in the CPU cache, where all
273          * writes are immediately visible in memory. (That's not strictly
274          * true, but it's close! There are internal buffers such as the
275          * write-combined buffer or a delay through the chipset for GTT
276          * writes that do require us to treat GTT as a separate cache domain.)
277          */
278         obj->read_domains = I915_GEM_DOMAIN_GTT;
279         obj->write_domain = 0;
280
281         return &obj->base;
282
283 fail_detach:
284         dma_buf_detach(dma_buf, attach);
285         dma_buf_put(dma_buf);
286
287         return ERR_PTR(ret);
288 }
289
290 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
291 #include "selftests/mock_dmabuf.c"
292 #include "selftests/i915_gem_dmabuf.c"
293 #endif