Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gem / i915_gem_dmabuf.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright 2012 Red Hat Inc
5  */
6
7 #include <linux/dma-buf.h>
8 #include <linux/highmem.h>
9 #include <linux/dma-resv.h>
10 #include <linux/module.h>
11
12 #include "i915_drv.h"
13 #include "i915_gem_object.h"
14 #include "i915_scatterlist.h"
15
16 MODULE_IMPORT_NS(DMA_BUF);
17
18 I915_SELFTEST_DECLARE(static bool force_different_devices;)
19
20 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
21 {
22         return to_intel_bo(buf->priv);
23 }
24
25 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
26                                              enum dma_data_direction dir)
27 {
28         struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
29         struct sg_table *st;
30         struct scatterlist *src, *dst;
31         int ret, i;
32
33         /* Copy sg so that we make an independent mapping */
34         st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
35         if (st == NULL) {
36                 ret = -ENOMEM;
37                 goto err;
38         }
39
40         ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
41         if (ret)
42                 goto err_free;
43
44         src = obj->mm.pages->sgl;
45         dst = st->sgl;
46         for (i = 0; i < obj->mm.pages->nents; i++) {
47                 sg_set_page(dst, sg_page(src), src->length, 0);
48                 dst = sg_next(dst);
49                 src = sg_next(src);
50         }
51
52         ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC);
53         if (ret)
54                 goto err_free_sg;
55
56         return st;
57
58 err_free_sg:
59         sg_free_table(st);
60 err_free:
61         kfree(st);
62 err:
63         return ERR_PTR(ret);
64 }
65
66 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
67                                    struct sg_table *sg,
68                                    enum dma_data_direction dir)
69 {
70         dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC);
71         sg_free_table(sg);
72         kfree(sg);
73 }
74
75 static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
76 {
77         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
78         void *vaddr;
79
80         vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
81         if (IS_ERR(vaddr))
82                 return PTR_ERR(vaddr);
83
84         dma_buf_map_set_vaddr(map, vaddr);
85
86         return 0;
87 }
88
89 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
90 {
91         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
92
93         i915_gem_object_flush_map(obj);
94         i915_gem_object_unpin_map(obj);
95 }
96
97 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
98 {
99         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
100         int ret;
101
102         if (obj->base.size < vma->vm_end - vma->vm_start)
103                 return -EINVAL;
104
105         if (!obj->base.filp)
106                 return -ENODEV;
107
108         ret = call_mmap(obj->base.filp, vma);
109         if (ret)
110                 return ret;
111
112         vma_set_file(vma, obj->base.filp);
113
114         return 0;
115 }
116
117 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
118 {
119         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
120         bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
121         struct i915_gem_ww_ctx ww;
122         int err;
123
124         i915_gem_ww_ctx_init(&ww, true);
125 retry:
126         err = i915_gem_object_lock(obj, &ww);
127         if (!err)
128                 err = i915_gem_object_pin_pages(obj);
129         if (!err) {
130                 err = i915_gem_object_set_to_cpu_domain(obj, write);
131                 i915_gem_object_unpin_pages(obj);
132         }
133         if (err == -EDEADLK) {
134                 err = i915_gem_ww_ctx_backoff(&ww);
135                 if (!err)
136                         goto retry;
137         }
138         i915_gem_ww_ctx_fini(&ww);
139         return err;
140 }
141
142 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
143 {
144         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
145         struct i915_gem_ww_ctx ww;
146         int err;
147
148         i915_gem_ww_ctx_init(&ww, true);
149 retry:
150         err = i915_gem_object_lock(obj, &ww);
151         if (!err)
152                 err = i915_gem_object_pin_pages(obj);
153         if (!err) {
154                 err = i915_gem_object_set_to_gtt_domain(obj, false);
155                 i915_gem_object_unpin_pages(obj);
156         }
157         if (err == -EDEADLK) {
158                 err = i915_gem_ww_ctx_backoff(&ww);
159                 if (!err)
160                         goto retry;
161         }
162         i915_gem_ww_ctx_fini(&ww);
163         return err;
164 }
165
166 static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf,
167                                   struct dma_buf_attachment *attach)
168 {
169         struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
170         struct i915_gem_ww_ctx ww;
171         int err;
172
173         if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM))
174                 return -EOPNOTSUPP;
175
176         for_i915_gem_ww(&ww, err, true) {
177                 err = i915_gem_object_lock(obj, &ww);
178                 if (err)
179                         continue;
180
181                 err = i915_gem_object_migrate(obj, &ww, INTEL_REGION_SMEM);
182                 if (err)
183                         continue;
184
185                 err = i915_gem_object_wait_migration(obj, 0);
186                 if (err)
187                         continue;
188
189                 err = i915_gem_object_pin_pages(obj);
190         }
191
192         return err;
193 }
194
195 static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf,
196                                    struct dma_buf_attachment *attach)
197 {
198         struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
199
200         i915_gem_object_unpin_pages(obj);
201 }
202
203 static const struct dma_buf_ops i915_dmabuf_ops =  {
204         .attach = i915_gem_dmabuf_attach,
205         .detach = i915_gem_dmabuf_detach,
206         .map_dma_buf = i915_gem_map_dma_buf,
207         .unmap_dma_buf = i915_gem_unmap_dma_buf,
208         .release = drm_gem_dmabuf_release,
209         .mmap = i915_gem_dmabuf_mmap,
210         .vmap = i915_gem_dmabuf_vmap,
211         .vunmap = i915_gem_dmabuf_vunmap,
212         .begin_cpu_access = i915_gem_begin_cpu_access,
213         .end_cpu_access = i915_gem_end_cpu_access,
214 };
215
216 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
217 {
218         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
219         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
220
221         exp_info.ops = &i915_dmabuf_ops;
222         exp_info.size = gem_obj->size;
223         exp_info.flags = flags;
224         exp_info.priv = gem_obj;
225         exp_info.resv = obj->base.resv;
226
227         if (obj->ops->dmabuf_export) {
228                 int ret = obj->ops->dmabuf_export(obj);
229                 if (ret)
230                         return ERR_PTR(ret);
231         }
232
233         return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
234 }
235
236 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
237 {
238         struct drm_i915_private *i915 = to_i915(obj->base.dev);
239         struct sg_table *pages;
240         unsigned int sg_page_sizes;
241
242         assert_object_held(obj);
243
244         pages = dma_buf_map_attachment(obj->base.import_attach,
245                                        DMA_BIDIRECTIONAL);
246         if (IS_ERR(pages))
247                 return PTR_ERR(pages);
248
249         /* XXX: consider doing a vmap flush or something */
250         if (!HAS_LLC(i915) || i915_gem_object_can_bypass_llc(obj))
251                 wbinvd_on_all_cpus();
252
253         sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
254         __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
255
256         return 0;
257 }
258
259 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
260                                              struct sg_table *pages)
261 {
262         dma_buf_unmap_attachment(obj->base.import_attach, pages,
263                                  DMA_BIDIRECTIONAL);
264 }
265
266 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
267         .name = "i915_gem_object_dmabuf",
268         .get_pages = i915_gem_object_get_pages_dmabuf,
269         .put_pages = i915_gem_object_put_pages_dmabuf,
270 };
271
272 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
273                                              struct dma_buf *dma_buf)
274 {
275         static struct lock_class_key lock_class;
276         struct dma_buf_attachment *attach;
277         struct drm_i915_gem_object *obj;
278         int ret;
279
280         /* is this one of own objects? */
281         if (dma_buf->ops == &i915_dmabuf_ops) {
282                 obj = dma_buf_to_obj(dma_buf);
283                 /* is it from our device? */
284                 if (obj->base.dev == dev &&
285                     !I915_SELFTEST_ONLY(force_different_devices)) {
286                         /*
287                          * Importing dmabuf exported from out own gem increases
288                          * refcount on gem itself instead of f_count of dmabuf.
289                          */
290                         return &i915_gem_object_get(obj)->base;
291                 }
292         }
293
294         if (i915_gem_object_size_2big(dma_buf->size))
295                 return ERR_PTR(-E2BIG);
296
297         /* need to attach */
298         attach = dma_buf_attach(dma_buf, dev->dev);
299         if (IS_ERR(attach))
300                 return ERR_CAST(attach);
301
302         get_dma_buf(dma_buf);
303
304         obj = i915_gem_object_alloc();
305         if (obj == NULL) {
306                 ret = -ENOMEM;
307                 goto fail_detach;
308         }
309
310         drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
311         i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class,
312                              I915_BO_ALLOC_USER);
313         obj->base.import_attach = attach;
314         obj->base.resv = dma_buf->resv;
315
316         /* We use GTT as shorthand for a coherent domain, one that is
317          * neither in the GPU cache nor in the CPU cache, where all
318          * writes are immediately visible in memory. (That's not strictly
319          * true, but it's close! There are internal buffers such as the
320          * write-combined buffer or a delay through the chipset for GTT
321          * writes that do require us to treat GTT as a separate cache domain.)
322          */
323         obj->read_domains = I915_GEM_DOMAIN_GTT;
324         obj->write_domain = 0;
325
326         return &obj->base;
327
328 fail_detach:
329         dma_buf_detach(dma_buf, attach);
330         dma_buf_put(dma_buf);
331
332         return ERR_PTR(ret);
333 }
334
335 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
336 #include "selftests/mock_dmabuf.c"
337 #include "selftests/i915_gem_dmabuf.c"
338 #endif