14fdb0796c5298ee263438eff3c61b9eab3b7d2c
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gem / i915_gem_dmabuf.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright 2012 Red Hat Inc
5  */
6
7 #include <linux/dma-buf.h>
8 #include <linux/highmem.h>
9 #include <linux/dma-resv.h>
10 #include <linux/module.h>
11
12 #include <asm/smp.h>
13
14 #include "i915_drv.h"
15 #include "i915_gem_object.h"
16 #include "i915_scatterlist.h"
17
18 MODULE_IMPORT_NS(DMA_BUF);
19
20 I915_SELFTEST_DECLARE(static bool force_different_devices;)
21
22 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
23 {
24         return to_intel_bo(buf->priv);
25 }
26
27 static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
28                                              enum dma_data_direction dir)
29 {
30         struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
31         struct sg_table *st;
32         struct scatterlist *src, *dst;
33         int ret, i;
34
35         /* Copy sg so that we make an independent mapping */
36         st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
37         if (st == NULL) {
38                 ret = -ENOMEM;
39                 goto err;
40         }
41
42         ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
43         if (ret)
44                 goto err_free;
45
46         src = obj->mm.pages->sgl;
47         dst = st->sgl;
48         for (i = 0; i < obj->mm.pages->nents; i++) {
49                 sg_set_page(dst, sg_page(src), src->length, 0);
50                 dst = sg_next(dst);
51                 src = sg_next(src);
52         }
53
54         ret = dma_map_sgtable(attachment->dev, st, dir, DMA_ATTR_SKIP_CPU_SYNC);
55         if (ret)
56                 goto err_free_sg;
57
58         return st;
59
60 err_free_sg:
61         sg_free_table(st);
62 err_free:
63         kfree(st);
64 err:
65         return ERR_PTR(ret);
66 }
67
68 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
69                                    struct sg_table *sg,
70                                    enum dma_data_direction dir)
71 {
72         dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC);
73         sg_free_table(sg);
74         kfree(sg);
75 }
76
77 static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf,
78                                 struct iosys_map *map)
79 {
80         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
81         void *vaddr;
82
83         vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
84         if (IS_ERR(vaddr))
85                 return PTR_ERR(vaddr);
86
87         iosys_map_set_vaddr(map, vaddr);
88
89         return 0;
90 }
91
92 static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf,
93                                    struct iosys_map *map)
94 {
95         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
96
97         i915_gem_object_flush_map(obj);
98         i915_gem_object_unpin_map(obj);
99 }
100
101 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
102 {
103         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
104         int ret;
105
106         if (obj->base.size < vma->vm_end - vma->vm_start)
107                 return -EINVAL;
108
109         if (!obj->base.filp)
110                 return -ENODEV;
111
112         ret = call_mmap(obj->base.filp, vma);
113         if (ret)
114                 return ret;
115
116         vma_set_file(vma, obj->base.filp);
117
118         return 0;
119 }
120
121 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
122 {
123         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
124         bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
125         struct i915_gem_ww_ctx ww;
126         int err;
127
128         i915_gem_ww_ctx_init(&ww, true);
129 retry:
130         err = i915_gem_object_lock(obj, &ww);
131         if (!err)
132                 err = i915_gem_object_pin_pages(obj);
133         if (!err) {
134                 err = i915_gem_object_set_to_cpu_domain(obj, write);
135                 i915_gem_object_unpin_pages(obj);
136         }
137         if (err == -EDEADLK) {
138                 err = i915_gem_ww_ctx_backoff(&ww);
139                 if (!err)
140                         goto retry;
141         }
142         i915_gem_ww_ctx_fini(&ww);
143         return err;
144 }
145
146 static int i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
147 {
148         struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
149         struct i915_gem_ww_ctx ww;
150         int err;
151
152         i915_gem_ww_ctx_init(&ww, true);
153 retry:
154         err = i915_gem_object_lock(obj, &ww);
155         if (!err)
156                 err = i915_gem_object_pin_pages(obj);
157         if (!err) {
158                 err = i915_gem_object_set_to_gtt_domain(obj, false);
159                 i915_gem_object_unpin_pages(obj);
160         }
161         if (err == -EDEADLK) {
162                 err = i915_gem_ww_ctx_backoff(&ww);
163                 if (!err)
164                         goto retry;
165         }
166         i915_gem_ww_ctx_fini(&ww);
167         return err;
168 }
169
170 static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf,
171                                   struct dma_buf_attachment *attach)
172 {
173         struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
174         struct i915_gem_ww_ctx ww;
175         int err;
176
177         if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM))
178                 return -EOPNOTSUPP;
179
180         for_i915_gem_ww(&ww, err, true) {
181                 err = i915_gem_object_lock(obj, &ww);
182                 if (err)
183                         continue;
184
185                 err = i915_gem_object_migrate(obj, &ww, INTEL_REGION_SMEM);
186                 if (err)
187                         continue;
188
189                 err = i915_gem_object_wait_migration(obj, 0);
190                 if (err)
191                         continue;
192
193                 err = i915_gem_object_pin_pages(obj);
194         }
195
196         return err;
197 }
198
199 static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf,
200                                    struct dma_buf_attachment *attach)
201 {
202         struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
203
204         i915_gem_object_unpin_pages(obj);
205 }
206
207 static const struct dma_buf_ops i915_dmabuf_ops =  {
208         .attach = i915_gem_dmabuf_attach,
209         .detach = i915_gem_dmabuf_detach,
210         .map_dma_buf = i915_gem_map_dma_buf,
211         .unmap_dma_buf = i915_gem_unmap_dma_buf,
212         .release = drm_gem_dmabuf_release,
213         .mmap = i915_gem_dmabuf_mmap,
214         .vmap = i915_gem_dmabuf_vmap,
215         .vunmap = i915_gem_dmabuf_vunmap,
216         .begin_cpu_access = i915_gem_begin_cpu_access,
217         .end_cpu_access = i915_gem_end_cpu_access,
218 };
219
220 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
221 {
222         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
223         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
224
225         exp_info.ops = &i915_dmabuf_ops;
226         exp_info.size = gem_obj->size;
227         exp_info.flags = flags;
228         exp_info.priv = gem_obj;
229         exp_info.resv = obj->base.resv;
230
231         if (obj->ops->dmabuf_export) {
232                 int ret = obj->ops->dmabuf_export(obj);
233                 if (ret)
234                         return ERR_PTR(ret);
235         }
236
237         return drm_gem_dmabuf_export(gem_obj->dev, &exp_info);
238 }
239
240 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
241 {
242         struct drm_i915_private *i915 = to_i915(obj->base.dev);
243         struct sg_table *pages;
244         unsigned int sg_page_sizes;
245
246         assert_object_held(obj);
247
248         pages = dma_buf_map_attachment(obj->base.import_attach,
249                                        DMA_BIDIRECTIONAL);
250         if (IS_ERR(pages))
251                 return PTR_ERR(pages);
252
253         /*
254          * DG1 is special here since it still snoops transactions even with
255          * CACHE_NONE. This is not the case with other HAS_SNOOP platforms. We
256          * might need to revisit this as we add new discrete platforms.
257          *
258          * XXX: Consider doing a vmap flush or something, where possible.
259          * Currently we just do a heavy handed wbinvd_on_all_cpus() here since
260          * the underlying sg_table might not even point to struct pages, so we
261          * can't just call drm_clflush_sg or similar, like we do elsewhere in
262          * the driver.
263          */
264         if (i915_gem_object_can_bypass_llc(obj) ||
265             (!HAS_LLC(i915) && !IS_DG1(i915)))
266                 wbinvd_on_all_cpus();
267
268         sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
269         __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
270
271         return 0;
272 }
273
274 static void i915_gem_object_put_pages_dmabuf(struct drm_i915_gem_object *obj,
275                                              struct sg_table *pages)
276 {
277         dma_buf_unmap_attachment(obj->base.import_attach, pages,
278                                  DMA_BIDIRECTIONAL);
279 }
280
281 static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = {
282         .name = "i915_gem_object_dmabuf",
283         .get_pages = i915_gem_object_get_pages_dmabuf,
284         .put_pages = i915_gem_object_put_pages_dmabuf,
285 };
286
287 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
288                                              struct dma_buf *dma_buf)
289 {
290         static struct lock_class_key lock_class;
291         struct dma_buf_attachment *attach;
292         struct drm_i915_gem_object *obj;
293         int ret;
294
295         /* is this one of own objects? */
296         if (dma_buf->ops == &i915_dmabuf_ops) {
297                 obj = dma_buf_to_obj(dma_buf);
298                 /* is it from our device? */
299                 if (obj->base.dev == dev &&
300                     !I915_SELFTEST_ONLY(force_different_devices)) {
301                         /*
302                          * Importing dmabuf exported from out own gem increases
303                          * refcount on gem itself instead of f_count of dmabuf.
304                          */
305                         return &i915_gem_object_get(obj)->base;
306                 }
307         }
308
309         if (i915_gem_object_size_2big(dma_buf->size))
310                 return ERR_PTR(-E2BIG);
311
312         /* need to attach */
313         attach = dma_buf_attach(dma_buf, dev->dev);
314         if (IS_ERR(attach))
315                 return ERR_CAST(attach);
316
317         get_dma_buf(dma_buf);
318
319         obj = i915_gem_object_alloc();
320         if (obj == NULL) {
321                 ret = -ENOMEM;
322                 goto fail_detach;
323         }
324
325         drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
326         i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class,
327                              I915_BO_ALLOC_USER);
328         obj->base.import_attach = attach;
329         obj->base.resv = dma_buf->resv;
330
331         /* We use GTT as shorthand for a coherent domain, one that is
332          * neither in the GPU cache nor in the CPU cache, where all
333          * writes are immediately visible in memory. (That's not strictly
334          * true, but it's close! There are internal buffers such as the
335          * write-combined buffer or a delay through the chipset for GTT
336          * writes that do require us to treat GTT as a separate cache domain.)
337          */
338         obj->read_domains = I915_GEM_DOMAIN_GTT;
339         obj->write_domain = 0;
340
341         return &obj->base;
342
343 fail_detach:
344         dma_buf_detach(dma_buf, attach);
345         dma_buf_put(dma_buf);
346
347         return ERR_PTR(ret);
348 }
349
350 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
351 #include "selftests/mock_dmabuf.c"
352 #include "selftests/i915_gem_dmabuf.c"
353 #endif