Merge tag 'drm-intel-next-2019-04-04' into gvt-next
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_gem_dmabuf.c
1 /*
2  * Copyright 2012 Red Hat Inc
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Dave Airlie <airlied@redhat.com>
25  */
26
27 #include <linux/dma-buf.h>
28 #include <linux/reservation.h>
29
30
31 #include "i915_drv.h"
32
33 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
34 {
35         return to_intel_bo(buf->priv);
36 }
37
38 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
39                                              enum dma_data_direction dir)
40 {
41         struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
42         struct sg_table *st;
43         struct scatterlist *src, *dst;
44         int ret, i;
45
46         ret = i915_gem_object_pin_pages(obj);
47         if (ret)
48                 goto err;
49
50         /* Copy sg so that we make an independent mapping */
51         st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
52         if (st == NULL) {
53                 ret = -ENOMEM;
54                 goto err_unpin_pages;
55         }
56
57         ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
58         if (ret)
59                 goto err_free;
60
61         src = obj->mm.pages->sgl;
62         dst = st->sgl;
63         for (i = 0; i < obj->mm.pages->nents; i++) {
64                 sg_set_page(dst, sg_page(src), src->length, 0);
65                 dst = sg_next(dst);
66                 src = sg_next(src);
67         }
68
69         if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
70                 ret = -ENOMEM;
71                 goto err_free_sg;
72         }
73
74         return st;
75
76 err_free_sg:
77         sg_free_table(st);
78 err_free:
79         kfree(st);
80 err_unpin_pages:
81         i915_gem_object_unpin_pages(obj);
82 err:
83         return ERR_PTR(ret);
84 }
85
86 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
87                                    struct sg_table *sg,
88                                    enum dma_data_direction dir)
89 {
90         struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
91
92         dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
93         sg_free_table(sg);
94         kfree(sg);
95
96         i915_gem_object_unpin_pages(obj);
97 }
98
99 static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
100 {
101         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
102
103         return i915_gem_object_pin_map(obj, I915_MAP_WB);
104 }
105
106 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
107 {
108         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
109
110         i915_gem_object_flush_map(obj);
111         i915_gem_object_unpin_map(obj);
112 }
113
114 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
115 {
116         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
117         struct page *page;
118
119         if (page_num >= obj->base.size >> PAGE_SHIFT)
120                 return NULL;
121
122         if (!i915_gem_object_has_struct_page(obj))
123                 return NULL;
124
125         if (i915_gem_object_pin_pages(obj))
126                 return NULL;
127
128         /* Synchronisation is left to the caller (via .begin_cpu_access()) */
129         page = i915_gem_object_get_page(obj, page_num);
130         if (IS_ERR(page))
131                 goto err_unpin;
132
133         return kmap(page);
134
135 err_unpin:
136         i915_gem_object_unpin_pages(obj);
137         return NULL;
138 }
139
140 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
141 {
142         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
143
144         kunmap(virt_to_page(addr));
145         i915_gem_object_unpin_pages(obj);
146 }
147
148 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
149 {
150         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
151         int ret;
152
153         if (obj->base.size < vma->vm_end - vma->vm_start)
154                 return -EINVAL;
155
156         if (!obj->base.filp)
157                 return -ENODEV;
158
159         ret = call_mmap(obj->base.filp, vma);
160         if (ret)
161                 return ret;
162
163         fput(vma->vm_file);
164         vma->vm_file = get_file(obj->base.filp);
165
166         return 0;
167 }
168
169 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
170 {
171         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
172         struct drm_device *dev = obj->base.dev;
173         bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
174         int err;
175
176         err = i915_gem_object_pin_pages(obj);
177         if (err)
178                 return err;
179
180         err = i915_mutex_lock_interruptible(dev);
181         if (err)
182                 goto out;
183
184         err = i915_gem_object_set_to_cpu_domain(obj, write);
185         mutex_unlock(&dev->struct_mutex);
186
187 out:
188         i915_gem_object_unpin_pages(obj);
189         return err;
190 }
191
192 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
193 {
194         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
195         struct drm_device *dev = obj->base.dev;
196         int err;
197
198         err = i915_gem_object_pin_pages(obj);
199         if (err)
200                 return err;
201
202         err = i915_mutex_lock_interruptible(dev);
203         if (err)
204                 goto out;
205
206         err = i915_gem_object_set_to_gtt_domain(obj, false);
207         mutex_unlock(&dev->struct_mutex);
208
209 out:
210         i915_gem_object_unpin_pages(obj);
211         return err;
212 }
213
214 static const struct dma_buf_ops i915_dmabuf_ops =  {
215         .map_dma_buf = i915_gem_map_dma_buf,
216         .unmap_dma_buf = i915_gem_unmap_dma_buf,
217         .release = drm_gem_dmabuf_release,
218         .map = i915_gem_dmabuf_kmap,
219         .unmap = i915_gem_dmabuf_kunmap,
220         .mmap = i915_gem_dmabuf_mmap,
221         .vmap = i915_gem_dmabuf_vmap,
222         .vunmap = i915_gem_dmabuf_vunmap,
223         .begin_cpu_access = i915_gem_begin_cpu_access,
224         .end_cpu_access = i915_gem_end_cpu_access,
225 };
226
227 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
228                                       struct drm_gem_object *gem_obj, int flags)
229 {
230         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
231         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
232
233         exp_info.ops = &i915_dmabuf_ops;
234         exp_info.size = gem_obj->size;
235         exp_info.flags = flags;
236         exp_info.priv = gem_obj;
237         exp_info.resv = obj->resv;
238
239         if (obj->ops->dmabuf_export) {
240                 int ret = obj->ops->dmabuf_export(obj);
241                 if (ret)
242                         return ERR_PTR(ret);
243         }
244
245         return drm_gem_dmabuf_export(dev, &exp_info);
246 }
247
248 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
249 {
250         struct sg_table *pages;
251         unsigned int sg_page_sizes;
252
253         pages = dma_buf_map_attachment(obj->base.import_attach,
254                                        DMA_BIDIRECTIONAL);
255         if (IS_ERR(pages))
256                 return PTR_ERR(pages);
257
258         sg_page_sizes = i915_sg_page_sizes(pages->sgl);
259
260         __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
261
262         return 0;
263 }
264
265 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
266                                              struct sg_table *pages)
267 {
268         dma_buf_unmap_attachment(obj->base.import_attach, pages,
269                                  DMA_BIDIRECTIONAL);
270 }
271
272 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
273         .get_pages = i915_gem_object_get_pages_dmabuf,
274         .put_pages = i915_gem_object_put_pages_dmabuf,
275 };
276
277 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
278                                              struct dma_buf *dma_buf)
279 {
280         struct dma_buf_attachment *attach;
281         struct drm_i915_gem_object *obj;
282         int ret;
283
284         /* is this one of own objects? */
285         if (dma_buf->ops == &i915_dmabuf_ops) {
286                 obj = dma_buf_to_obj(dma_buf);
287                 /* is it from our device? */
288                 if (obj->base.dev == dev) {
289                         /*
290                          * Importing dmabuf exported from out own gem increases
291                          * refcount on gem itself instead of f_count of dmabuf.
292                          */
293                         return &i915_gem_object_get(obj)->base;
294                 }
295         }
296
297         /* need to attach */
298         attach = dma_buf_attach(dma_buf, dev->dev);
299         if (IS_ERR(attach))
300                 return ERR_CAST(attach);
301
302         get_dma_buf(dma_buf);
303
304         obj = i915_gem_object_alloc();
305         if (obj == NULL) {
306                 ret = -ENOMEM;
307                 goto fail_detach;
308         }
309
310         drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
311         i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
312         obj->base.import_attach = attach;
313         obj->resv = dma_buf->resv;
314
315         /* We use GTT as shorthand for a coherent domain, one that is
316          * neither in the GPU cache nor in the CPU cache, where all
317          * writes are immediately visible in memory. (That's not strictly
318          * true, but it's close! There are internal buffers such as the
319          * write-combined buffer or a delay through the chipset for GTT
320          * writes that do require us to treat GTT as a separate cache domain.)
321          */
322         obj->read_domains = I915_GEM_DOMAIN_GTT;
323         obj->write_domain = 0;
324
325         return &obj->base;
326
327 fail_detach:
328         dma_buf_detach(dma_buf, attach);
329         dma_buf_put(dma_buf);
330
331         return ERR_PTR(ret);
332 }
333
334 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
335 #include "selftests/mock_dmabuf.c"
336 #include "selftests/i915_gem_dmabuf.c"
337 #endif