Merge tag 'mac80211-for-davem-2018-09-03' of git://git.kernel.org/pub/scm/linux/kerne...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gvt / dmabuf.c
1 /*
2  * Copyright 2017 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Zhiyuan Lv <zhiyuan.lv@intel.com>
25  *
26  * Contributors:
27  *    Xiaoguang Chen
28  *    Tina Zhang <tina.zhang@intel.com>
29  */
30
31 #include <linux/dma-buf.h>
32 #include <drm/drmP.h>
33 #include <linux/vfio.h>
34
35 #include "i915_drv.h"
36 #include "gvt.h"
37
38 #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
39
40 static int vgpu_gem_get_pages(
41                 struct drm_i915_gem_object *obj)
42 {
43         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
44         struct sg_table *st;
45         struct scatterlist *sg;
46         int i, ret;
47         gen8_pte_t __iomem *gtt_entries;
48         struct intel_vgpu_fb_info *fb_info;
49
50         fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
51         if (WARN_ON(!fb_info))
52                 return -ENODEV;
53
54         st = kmalloc(sizeof(*st), GFP_KERNEL);
55         if (unlikely(!st))
56                 return -ENOMEM;
57
58         ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL);
59         if (ret) {
60                 kfree(st);
61                 return ret;
62         }
63         gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
64                 (fb_info->start >> PAGE_SHIFT);
65         for_each_sg(st->sgl, sg, fb_info->size, i) {
66                 sg->offset = 0;
67                 sg->length = PAGE_SIZE;
68                 sg_dma_address(sg) =
69                         GEN8_DECODE_PTE(readq(&gtt_entries[i]));
70                 sg_dma_len(sg) = PAGE_SIZE;
71         }
72
73         __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
74
75         return 0;
76 }
77
78 static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
79                 struct sg_table *pages)
80 {
81         sg_free_table(pages);
82         kfree(pages);
83 }
84
85 static void dmabuf_gem_object_free(struct kref *kref)
86 {
87         struct intel_vgpu_dmabuf_obj *obj =
88                 container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
89         struct intel_vgpu *vgpu = obj->vgpu;
90         struct list_head *pos;
91         struct intel_vgpu_dmabuf_obj *dmabuf_obj;
92
93         if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
94                 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
95                         dmabuf_obj = container_of(pos,
96                                         struct intel_vgpu_dmabuf_obj, list);
97                         if (dmabuf_obj == obj) {
98                                 intel_gvt_hypervisor_put_vfio_device(vgpu);
99                                 idr_remove(&vgpu->object_idr,
100                                            dmabuf_obj->dmabuf_id);
101                                 kfree(dmabuf_obj->info);
102                                 kfree(dmabuf_obj);
103                                 list_del(pos);
104                                 break;
105                         }
106                 }
107         } else {
108                 /* Free the orphan dmabuf_objs here */
109                 kfree(obj->info);
110                 kfree(obj);
111         }
112 }
113
114
115 static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
116 {
117         kref_get(&obj->kref);
118 }
119
120 static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
121 {
122         kref_put(&obj->kref, dmabuf_gem_object_free);
123 }
124
125 static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
126 {
127
128         struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
129         struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
130         struct intel_vgpu *vgpu = obj->vgpu;
131
132         if (vgpu) {
133                 mutex_lock(&vgpu->dmabuf_lock);
134                 gem_obj->base.dma_buf = NULL;
135                 dmabuf_obj_put(obj);
136                 mutex_unlock(&vgpu->dmabuf_lock);
137         } else {
138                 /* vgpu is NULL, as it has been removed already */
139                 gem_obj->base.dma_buf = NULL;
140                 dmabuf_obj_put(obj);
141         }
142 }
143
144 static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
145         .flags = I915_GEM_OBJECT_IS_PROXY,
146         .get_pages = vgpu_gem_get_pages,
147         .put_pages = vgpu_gem_put_pages,
148         .release = vgpu_gem_release,
149 };
150
151 static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
152                 struct intel_vgpu_fb_info *info)
153 {
154         struct drm_i915_private *dev_priv = to_i915(dev);
155         struct drm_i915_gem_object *obj;
156
157         obj = i915_gem_object_alloc(dev_priv);
158         if (obj == NULL)
159                 return NULL;
160
161         drm_gem_private_object_init(dev, &obj->base,
162                 info->size << PAGE_SHIFT);
163         i915_gem_object_init(obj, &intel_vgpu_gem_ops);
164
165         obj->read_domains = I915_GEM_DOMAIN_GTT;
166         obj->write_domain = 0;
167         if (IS_SKYLAKE(dev_priv)
168                 || IS_KABYLAKE(dev_priv)
169                 || IS_BROXTON(dev_priv)) {
170                 unsigned int tiling_mode = 0;
171                 unsigned int stride = 0;
172
173                 switch (info->drm_format_mod << 10) {
174                 case PLANE_CTL_TILED_LINEAR:
175                         tiling_mode = I915_TILING_NONE;
176                         break;
177                 case PLANE_CTL_TILED_X:
178                         tiling_mode = I915_TILING_X;
179                         stride = info->stride;
180                         break;
181                 case PLANE_CTL_TILED_Y:
182                         tiling_mode = I915_TILING_Y;
183                         stride = info->stride;
184                         break;
185                 default:
186                         gvt_dbg_core("not supported tiling mode\n");
187                 }
188                 obj->tiling_and_stride = tiling_mode | stride;
189         } else {
190                 obj->tiling_and_stride = info->drm_format_mod ?
191                                         I915_TILING_X : 0;
192         }
193
194         return obj;
195 }
196
197 static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
198 {
199         if (c && c->x_hot <= c->width && c->y_hot <= c->height)
200                 return true;
201         else
202                 return false;
203 }
204
205 static int vgpu_get_plane_info(struct drm_device *dev,
206                 struct intel_vgpu *vgpu,
207                 struct intel_vgpu_fb_info *info,
208                 int plane_id)
209 {
210         struct drm_i915_private *dev_priv = to_i915(dev);
211         struct intel_vgpu_primary_plane_format p;
212         struct intel_vgpu_cursor_plane_format c;
213         int ret;
214
215         if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
216                 ret = intel_vgpu_decode_primary_plane(vgpu, &p);
217                 if (ret)
218                         return ret;
219                 info->start = p.base;
220                 info->start_gpa = p.base_gpa;
221                 info->width = p.width;
222                 info->height = p.height;
223                 info->stride = p.stride;
224                 info->drm_format = p.drm_format;
225                 info->drm_format_mod = p.tiled;
226                 info->size = (((p.stride * p.height * p.bpp) / 8) +
227                                 (PAGE_SIZE - 1)) >> PAGE_SHIFT;
228         } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
229                 ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
230                 if (ret)
231                         return ret;
232                 info->start = c.base;
233                 info->start_gpa = c.base_gpa;
234                 info->width = c.width;
235                 info->height = c.height;
236                 info->stride = c.width * (c.bpp / 8);
237                 info->drm_format = c.drm_format;
238                 info->drm_format_mod = 0;
239                 info->x_pos = c.x_pos;
240                 info->y_pos = c.y_pos;
241
242                 if (validate_hotspot(&c)) {
243                         info->x_hot = c.x_hot;
244                         info->y_hot = c.y_hot;
245                 } else {
246                         info->x_hot = UINT_MAX;
247                         info->y_hot = UINT_MAX;
248                 }
249
250                 info->size = (((info->stride * c.height * c.bpp) / 8)
251                                 + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
252         } else {
253                 gvt_vgpu_err("invalid plane id:%d\n", plane_id);
254                 return -EINVAL;
255         }
256
257         if (info->size == 0) {
258                 gvt_vgpu_err("fb size is zero\n");
259                 return -EINVAL;
260         }
261
262         if (info->start & (PAGE_SIZE - 1)) {
263                 gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
264                 return -EFAULT;
265         }
266         if (((info->start >> PAGE_SHIFT) + info->size) >
267                 ggtt_total_entries(&dev_priv->ggtt)) {
268                 gvt_vgpu_err("Invalid GTT offset or size\n");
269                 return -EFAULT;
270         }
271
272         if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
273                 gvt_vgpu_err("invalid gma addr\n");
274                 return -EFAULT;
275         }
276
277         return 0;
278 }
279
280 static struct intel_vgpu_dmabuf_obj *
281 pick_dmabuf_by_info(struct intel_vgpu *vgpu,
282                     struct intel_vgpu_fb_info *latest_info)
283 {
284         struct list_head *pos;
285         struct intel_vgpu_fb_info *fb_info;
286         struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
287         struct intel_vgpu_dmabuf_obj *ret = NULL;
288
289         list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
290                 dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
291                                                 list);
292                 if ((dmabuf_obj == NULL) ||
293                     (dmabuf_obj->info == NULL))
294                         continue;
295
296                 fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
297                 if ((fb_info->start == latest_info->start) &&
298                     (fb_info->start_gpa == latest_info->start_gpa) &&
299                     (fb_info->size == latest_info->size) &&
300                     (fb_info->drm_format_mod == latest_info->drm_format_mod) &&
301                     (fb_info->drm_format == latest_info->drm_format) &&
302                     (fb_info->width == latest_info->width) &&
303                     (fb_info->height == latest_info->height)) {
304                         ret = dmabuf_obj;
305                         break;
306                 }
307         }
308
309         return ret;
310 }
311
312 static struct intel_vgpu_dmabuf_obj *
313 pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
314 {
315         struct list_head *pos;
316         struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
317         struct intel_vgpu_dmabuf_obj *ret = NULL;
318
319         list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
320                 dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
321                                                 list);
322                 if (!dmabuf_obj)
323                         continue;
324
325                 if (dmabuf_obj->dmabuf_id == id) {
326                         ret = dmabuf_obj;
327                         break;
328                 }
329         }
330
331         return ret;
332 }
333
334 static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
335                       struct intel_vgpu_fb_info *fb_info)
336 {
337         gvt_dmabuf->drm_format = fb_info->drm_format;
338         gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
339         gvt_dmabuf->width = fb_info->width;
340         gvt_dmabuf->height = fb_info->height;
341         gvt_dmabuf->stride = fb_info->stride;
342         gvt_dmabuf->size = fb_info->size;
343         gvt_dmabuf->x_pos = fb_info->x_pos;
344         gvt_dmabuf->y_pos = fb_info->y_pos;
345         gvt_dmabuf->x_hot = fb_info->x_hot;
346         gvt_dmabuf->y_hot = fb_info->y_hot;
347 }
348
349 int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
350 {
351         struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
352         struct vfio_device_gfx_plane_info *gfx_plane_info = args;
353         struct intel_vgpu_dmabuf_obj *dmabuf_obj;
354         struct intel_vgpu_fb_info fb_info;
355         int ret = 0;
356
357         if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
358                                        VFIO_GFX_PLANE_TYPE_PROBE))
359                 return ret;
360         else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
361                         (!gfx_plane_info->flags))
362                 return -EINVAL;
363
364         ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
365                                         gfx_plane_info->drm_plane_type);
366         if (ret != 0)
367                 goto out;
368
369         mutex_lock(&vgpu->dmabuf_lock);
370         /* If exists, pick up the exposed dmabuf_obj */
371         dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
372         if (dmabuf_obj) {
373                 update_fb_info(gfx_plane_info, &fb_info);
374                 gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
375
376                 /* This buffer may be released between query_plane ioctl and
377                  * get_dmabuf ioctl. Add the refcount to make sure it won't
378                  * be released between the two ioctls.
379                  */
380                 if (!dmabuf_obj->initref) {
381                         dmabuf_obj->initref = true;
382                         dmabuf_obj_get(dmabuf_obj);
383                 }
384                 ret = 0;
385                 gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
386                             vgpu->id, kref_read(&dmabuf_obj->kref),
387                             gfx_plane_info->dmabuf_id);
388                 mutex_unlock(&vgpu->dmabuf_lock);
389                 goto out;
390         }
391
392         mutex_unlock(&vgpu->dmabuf_lock);
393
394         /* Need to allocate a new one*/
395         dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
396         if (unlikely(!dmabuf_obj)) {
397                 gvt_vgpu_err("alloc dmabuf_obj failed\n");
398                 ret = -ENOMEM;
399                 goto out;
400         }
401
402         dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info),
403                                    GFP_KERNEL);
404         if (unlikely(!dmabuf_obj->info)) {
405                 gvt_vgpu_err("allocate intel vgpu fb info failed\n");
406                 ret = -ENOMEM;
407                 goto out_free_dmabuf;
408         }
409         memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
410
411         ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
412
413         dmabuf_obj->vgpu = vgpu;
414
415         ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
416         if (ret < 0)
417                 goto out_free_info;
418         gfx_plane_info->dmabuf_id = ret;
419         dmabuf_obj->dmabuf_id = ret;
420
421         dmabuf_obj->initref = true;
422
423         kref_init(&dmabuf_obj->kref);
424
425         mutex_lock(&vgpu->dmabuf_lock);
426         if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
427                 gvt_vgpu_err("get vfio device failed\n");
428                 mutex_unlock(&vgpu->dmabuf_lock);
429                 goto out_free_info;
430         }
431         mutex_unlock(&vgpu->dmabuf_lock);
432
433         update_fb_info(gfx_plane_info, &fb_info);
434
435         INIT_LIST_HEAD(&dmabuf_obj->list);
436         mutex_lock(&vgpu->dmabuf_lock);
437         list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
438         mutex_unlock(&vgpu->dmabuf_lock);
439
440         gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
441                     __func__, kref_read(&dmabuf_obj->kref), ret);
442
443         return 0;
444
445 out_free_info:
446         kfree(dmabuf_obj->info);
447 out_free_dmabuf:
448         kfree(dmabuf_obj);
449 out:
450         /* ENODEV means plane isn't ready, which might be a normal case. */
451         return (ret == -ENODEV) ? 0 : ret;
452 }
453
454 /* To associate an exposed dmabuf with the dmabuf_obj */
455 int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
456 {
457         struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
458         struct intel_vgpu_dmabuf_obj *dmabuf_obj;
459         struct drm_i915_gem_object *obj;
460         struct dma_buf *dmabuf;
461         int dmabuf_fd;
462         int ret = 0;
463
464         mutex_lock(&vgpu->dmabuf_lock);
465
466         dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
467         if (dmabuf_obj == NULL) {
468                 gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
469                 ret = -EINVAL;
470                 goto out;
471         }
472
473         obj = vgpu_create_gem(dev, dmabuf_obj->info);
474         if (obj == NULL) {
475                 gvt_vgpu_err("create gvt gem obj failed\n");
476                 ret = -ENOMEM;
477                 goto out;
478         }
479
480         obj->gvt_info = dmabuf_obj->info;
481
482         dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR);
483         if (IS_ERR(dmabuf)) {
484                 gvt_vgpu_err("export dma-buf failed\n");
485                 ret = PTR_ERR(dmabuf);
486                 goto out_free_gem;
487         }
488
489         i915_gem_object_put(obj);
490
491         ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
492         if (ret < 0) {
493                 gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
494                 goto out_free_dmabuf;
495         }
496         dmabuf_fd = ret;
497
498         dmabuf_obj_get(dmabuf_obj);
499
500         if (dmabuf_obj->initref) {
501                 dmabuf_obj->initref = false;
502                 dmabuf_obj_put(dmabuf_obj);
503         }
504
505         mutex_unlock(&vgpu->dmabuf_lock);
506
507         gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
508                     "        file count: %ld, GEM ref: %d\n",
509                     vgpu->id, dmabuf_obj->dmabuf_id,
510                     kref_read(&dmabuf_obj->kref),
511                     dmabuf_fd,
512                     file_count(dmabuf->file),
513                     kref_read(&obj->base.refcount));
514
515         return dmabuf_fd;
516
517 out_free_dmabuf:
518         dma_buf_put(dmabuf);
519 out_free_gem:
520         i915_gem_object_put(obj);
521 out:
522         mutex_unlock(&vgpu->dmabuf_lock);
523         return ret;
524 }
525
526 void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
527 {
528         struct list_head *pos, *n;
529         struct intel_vgpu_dmabuf_obj *dmabuf_obj;
530
531         mutex_lock(&vgpu->dmabuf_lock);
532         list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
533                 dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
534                                                 list);
535                 dmabuf_obj->vgpu = NULL;
536
537                 idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
538                 intel_gvt_hypervisor_put_vfio_device(vgpu);
539                 list_del(pos);
540
541                 /* dmabuf_obj might be freed in dmabuf_obj_put */
542                 if (dmabuf_obj->initref) {
543                         dmabuf_obj->initref = false;
544                         dmabuf_obj_put(dmabuf_obj);
545                 }
546
547         }
548         mutex_unlock(&vgpu->dmabuf_lock);
549 }