Merge tag 'drm-intel-next-2019-10-07' of git://anongit.freedesktop.org/drm/drm-intel...
[linux-2.6-microblaze.git] / drivers / gpu / drm / virtio / virtgpu_ioctl.c
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie
7  *    Alon Levy
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27
28 #include <linux/file.h>
29 #include <linux/sync_file.h>
30
31 #include <drm/drm_file.h>
32 #include <drm/ttm/ttm_execbuf_util.h>
33 #include <drm/virtgpu_drm.h>
34
35 #include "virtgpu_drv.h"
36
37 static void convert_to_hw_box(struct virtio_gpu_box *dst,
38                               const struct drm_virtgpu_3d_box *src)
39 {
40         dst->x = cpu_to_le32(src->x);
41         dst->y = cpu_to_le32(src->y);
42         dst->z = cpu_to_le32(src->z);
43         dst->w = cpu_to_le32(src->w);
44         dst->h = cpu_to_le32(src->h);
45         dst->d = cpu_to_le32(src->d);
46 }
47
48 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
49                                 struct drm_file *file_priv)
50 {
51         struct virtio_gpu_device *vgdev = dev->dev_private;
52         struct drm_virtgpu_map *virtio_gpu_map = data;
53
54         return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
55                                          virtio_gpu_map->handle,
56                                          &virtio_gpu_map->offset);
57 }
58
59 int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
60                                     struct list_head *head)
61 {
62         struct ttm_operation_ctx ctx = { false, false };
63         struct ttm_validate_buffer *buf;
64         struct ttm_buffer_object *bo;
65         struct virtio_gpu_object *qobj;
66         int ret;
67
68         ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true);
69         if (ret != 0)
70                 return ret;
71
72         list_for_each_entry(buf, head, head) {
73                 bo = buf->bo;
74                 qobj = container_of(bo, struct virtio_gpu_object, tbo);
75                 ret = ttm_bo_validate(bo, &qobj->placement, &ctx);
76                 if (ret) {
77                         ttm_eu_backoff_reservation(ticket, head);
78                         return ret;
79                 }
80         }
81         return 0;
82 }
83
84 void virtio_gpu_unref_list(struct list_head *head)
85 {
86         struct ttm_validate_buffer *buf;
87         struct ttm_buffer_object *bo;
88         struct virtio_gpu_object *qobj;
89
90         list_for_each_entry(buf, head, head) {
91                 bo = buf->bo;
92                 qobj = container_of(bo, struct virtio_gpu_object, tbo);
93
94                 drm_gem_object_put_unlocked(&qobj->gem_base);
95         }
96 }
97
98 /*
99  * Usage of execbuffer:
100  * Relocations need to take into account the full VIRTIO_GPUDrawable size.
101  * However, the command as passed from user space must *not* contain the initial
102  * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
103  */
104 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
105                                  struct drm_file *drm_file)
106 {
107         struct drm_virtgpu_execbuffer *exbuf = data;
108         struct virtio_gpu_device *vgdev = dev->dev_private;
109         struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
110         struct drm_gem_object *gobj;
111         struct virtio_gpu_fence *out_fence;
112         struct virtio_gpu_object *qobj;
113         int ret;
114         uint32_t *bo_handles = NULL;
115         void __user *user_bo_handles = NULL;
116         struct list_head validate_list;
117         struct ttm_validate_buffer *buflist = NULL;
118         int i;
119         struct ww_acquire_ctx ticket;
120         struct sync_file *sync_file;
121         int in_fence_fd = exbuf->fence_fd;
122         int out_fence_fd = -1;
123         void *buf;
124
125         if (vgdev->has_virgl_3d == false)
126                 return -ENOSYS;
127
128         if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
129                 return -EINVAL;
130
131         exbuf->fence_fd = -1;
132
133         if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
134                 struct dma_fence *in_fence;
135
136                 in_fence = sync_file_get_fence(in_fence_fd);
137
138                 if (!in_fence)
139                         return -EINVAL;
140
141                 /*
142                  * Wait if the fence is from a foreign context, or if the fence
143                  * array contains any fence from a foreign context.
144                  */
145                 ret = 0;
146                 if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
147                         ret = dma_fence_wait(in_fence, true);
148
149                 dma_fence_put(in_fence);
150                 if (ret)
151                         return ret;
152         }
153
154         if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
155                 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
156                 if (out_fence_fd < 0)
157                         return out_fence_fd;
158         }
159
160         INIT_LIST_HEAD(&validate_list);
161         if (exbuf->num_bo_handles) {
162
163                 bo_handles = kvmalloc_array(exbuf->num_bo_handles,
164                                            sizeof(uint32_t), GFP_KERNEL);
165                 buflist = kvmalloc_array(exbuf->num_bo_handles,
166                                            sizeof(struct ttm_validate_buffer),
167                                            GFP_KERNEL | __GFP_ZERO);
168                 if (!bo_handles || !buflist) {
169                         ret = -ENOMEM;
170                         goto out_unused_fd;
171                 }
172
173                 user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
174                 if (copy_from_user(bo_handles, user_bo_handles,
175                                    exbuf->num_bo_handles * sizeof(uint32_t))) {
176                         ret = -EFAULT;
177                         goto out_unused_fd;
178                 }
179
180                 for (i = 0; i < exbuf->num_bo_handles; i++) {
181                         gobj = drm_gem_object_lookup(drm_file, bo_handles[i]);
182                         if (!gobj) {
183                                 ret = -ENOENT;
184                                 goto out_unused_fd;
185                         }
186
187                         qobj = gem_to_virtio_gpu_obj(gobj);
188                         buflist[i].bo = &qobj->tbo;
189
190                         list_add(&buflist[i].head, &validate_list);
191                 }
192                 kvfree(bo_handles);
193                 bo_handles = NULL;
194         }
195
196         ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
197         if (ret)
198                 goto out_free;
199
200         buf = memdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
201         if (IS_ERR(buf)) {
202                 ret = PTR_ERR(buf);
203                 goto out_unresv;
204         }
205
206         out_fence = virtio_gpu_fence_alloc(vgdev);
207         if(!out_fence) {
208                 ret = -ENOMEM;
209                 goto out_memdup;
210         }
211
212         if (out_fence_fd >= 0) {
213                 sync_file = sync_file_create(&out_fence->f);
214                 if (!sync_file) {
215                         dma_fence_put(&out_fence->f);
216                         ret = -ENOMEM;
217                         goto out_memdup;
218                 }
219
220                 exbuf->fence_fd = out_fence_fd;
221                 fd_install(out_fence_fd, sync_file->file);
222         }
223
224         virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
225                               vfpriv->ctx_id, out_fence);
226
227         ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f);
228
229         /* fence the command bo */
230         virtio_gpu_unref_list(&validate_list);
231         kvfree(buflist);
232         return 0;
233
234 out_memdup:
235         kfree(buf);
236 out_unresv:
237         ttm_eu_backoff_reservation(&ticket, &validate_list);
238 out_free:
239         virtio_gpu_unref_list(&validate_list);
240 out_unused_fd:
241         kvfree(bo_handles);
242         kvfree(buflist);
243
244         if (out_fence_fd >= 0)
245                 put_unused_fd(out_fence_fd);
246
247         return ret;
248 }
249
250 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
251                                      struct drm_file *file_priv)
252 {
253         struct virtio_gpu_device *vgdev = dev->dev_private;
254         struct drm_virtgpu_getparam *param = data;
255         int value;
256
257         switch (param->param) {
258         case VIRTGPU_PARAM_3D_FEATURES:
259                 value = vgdev->has_virgl_3d == true ? 1 : 0;
260                 break;
261         case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
262                 value = 1;
263                 break;
264         default:
265                 return -EINVAL;
266         }
267         if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
268                 return -EFAULT;
269
270         return 0;
271 }
272
273 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
274                                             struct drm_file *file_priv)
275 {
276         struct virtio_gpu_device *vgdev = dev->dev_private;
277         struct drm_virtgpu_resource_create *rc = data;
278         struct virtio_gpu_fence *fence;
279         int ret;
280         struct virtio_gpu_object *qobj;
281         struct drm_gem_object *obj;
282         uint32_t handle = 0;
283         struct virtio_gpu_object_params params = { 0 };
284
285         if (vgdev->has_virgl_3d == false) {
286                 if (rc->depth > 1)
287                         return -EINVAL;
288                 if (rc->nr_samples > 1)
289                         return -EINVAL;
290                 if (rc->last_level > 1)
291                         return -EINVAL;
292                 if (rc->target != 2)
293                         return -EINVAL;
294                 if (rc->array_size > 1)
295                         return -EINVAL;
296         }
297
298         params.format = rc->format;
299         params.width = rc->width;
300         params.height = rc->height;
301         params.size = rc->size;
302         if (vgdev->has_virgl_3d) {
303                 params.virgl = true;
304                 params.target = rc->target;
305                 params.bind = rc->bind;
306                 params.depth = rc->depth;
307                 params.array_size = rc->array_size;
308                 params.last_level = rc->last_level;
309                 params.nr_samples = rc->nr_samples;
310                 params.flags = rc->flags;
311         }
312         /* allocate a single page size object */
313         if (params.size == 0)
314                 params.size = PAGE_SIZE;
315
316         fence = virtio_gpu_fence_alloc(vgdev);
317         if (!fence)
318                 return -ENOMEM;
319         qobj = virtio_gpu_alloc_object(dev, &params, fence);
320         dma_fence_put(&fence->f);
321         if (IS_ERR(qobj))
322                 return PTR_ERR(qobj);
323         obj = &qobj->gem_base;
324
325         ret = drm_gem_handle_create(file_priv, obj, &handle);
326         if (ret) {
327                 drm_gem_object_release(obj);
328                 return ret;
329         }
330         drm_gem_object_put_unlocked(obj);
331
332         rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
333         rc->bo_handle = handle;
334         return 0;
335 }
336
337 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
338                                           struct drm_file *file_priv)
339 {
340         struct drm_virtgpu_resource_info *ri = data;
341         struct drm_gem_object *gobj = NULL;
342         struct virtio_gpu_object *qobj = NULL;
343
344         gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
345         if (gobj == NULL)
346                 return -ENOENT;
347
348         qobj = gem_to_virtio_gpu_obj(gobj);
349
350         ri->size = qobj->gem_base.size;
351         ri->res_handle = qobj->hw_res_handle;
352         drm_gem_object_put_unlocked(gobj);
353         return 0;
354 }
355
356 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
357                                                void *data,
358                                                struct drm_file *file)
359 {
360         struct virtio_gpu_device *vgdev = dev->dev_private;
361         struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
362         struct drm_virtgpu_3d_transfer_from_host *args = data;
363         struct ttm_operation_ctx ctx = { true, false };
364         struct drm_gem_object *gobj = NULL;
365         struct virtio_gpu_object *qobj = NULL;
366         struct virtio_gpu_fence *fence;
367         int ret;
368         u32 offset = args->offset;
369         struct virtio_gpu_box box;
370
371         if (vgdev->has_virgl_3d == false)
372                 return -ENOSYS;
373
374         gobj = drm_gem_object_lookup(file, args->bo_handle);
375         if (gobj == NULL)
376                 return -ENOENT;
377
378         qobj = gem_to_virtio_gpu_obj(gobj);
379
380         ret = virtio_gpu_object_reserve(qobj, false);
381         if (ret)
382                 goto out;
383
384         ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
385         if (unlikely(ret))
386                 goto out_unres;
387
388         convert_to_hw_box(&box, &args->box);
389
390         fence = virtio_gpu_fence_alloc(vgdev);
391         if (!fence) {
392                 ret = -ENOMEM;
393                 goto out_unres;
394         }
395         virtio_gpu_cmd_transfer_from_host_3d
396                 (vgdev, qobj->hw_res_handle,
397                  vfpriv->ctx_id, offset, args->level,
398                  &box, fence);
399         dma_resv_add_excl_fence(qobj->tbo.base.resv,
400                                           &fence->f);
401
402         dma_fence_put(&fence->f);
403 out_unres:
404         virtio_gpu_object_unreserve(qobj);
405 out:
406         drm_gem_object_put_unlocked(gobj);
407         return ret;
408 }
409
410 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
411                                              struct drm_file *file)
412 {
413         struct virtio_gpu_device *vgdev = dev->dev_private;
414         struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
415         struct drm_virtgpu_3d_transfer_to_host *args = data;
416         struct ttm_operation_ctx ctx = { true, false };
417         struct drm_gem_object *gobj = NULL;
418         struct virtio_gpu_object *qobj = NULL;
419         struct virtio_gpu_fence *fence;
420         struct virtio_gpu_box box;
421         int ret;
422         u32 offset = args->offset;
423
424         gobj = drm_gem_object_lookup(file, args->bo_handle);
425         if (gobj == NULL)
426                 return -ENOENT;
427
428         qobj = gem_to_virtio_gpu_obj(gobj);
429
430         ret = virtio_gpu_object_reserve(qobj, false);
431         if (ret)
432                 goto out;
433
434         ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
435         if (unlikely(ret))
436                 goto out_unres;
437
438         convert_to_hw_box(&box, &args->box);
439         if (!vgdev->has_virgl_3d) {
440                 virtio_gpu_cmd_transfer_to_host_2d
441                         (vgdev, qobj, offset,
442                          box.w, box.h, box.x, box.y, NULL);
443         } else {
444                 fence = virtio_gpu_fence_alloc(vgdev);
445                 if (!fence) {
446                         ret = -ENOMEM;
447                         goto out_unres;
448                 }
449                 virtio_gpu_cmd_transfer_to_host_3d
450                         (vgdev, qobj,
451                          vfpriv ? vfpriv->ctx_id : 0, offset,
452                          args->level, &box, fence);
453                 dma_resv_add_excl_fence(qobj->tbo.base.resv,
454                                                   &fence->f);
455                 dma_fence_put(&fence->f);
456         }
457
458 out_unres:
459         virtio_gpu_object_unreserve(qobj);
460 out:
461         drm_gem_object_put_unlocked(gobj);
462         return ret;
463 }
464
465 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
466                             struct drm_file *file)
467 {
468         struct drm_virtgpu_3d_wait *args = data;
469         struct drm_gem_object *gobj = NULL;
470         struct virtio_gpu_object *qobj = NULL;
471         int ret;
472         bool nowait = false;
473
474         gobj = drm_gem_object_lookup(file, args->handle);
475         if (gobj == NULL)
476                 return -ENOENT;
477
478         qobj = gem_to_virtio_gpu_obj(gobj);
479
480         if (args->flags & VIRTGPU_WAIT_NOWAIT)
481                 nowait = true;
482         ret = virtio_gpu_object_wait(qobj, nowait);
483
484         drm_gem_object_put_unlocked(gobj);
485         return ret;
486 }
487
488 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
489                                 void *data, struct drm_file *file)
490 {
491         struct virtio_gpu_device *vgdev = dev->dev_private;
492         struct drm_virtgpu_get_caps *args = data;
493         unsigned size, host_caps_size;
494         int i;
495         int found_valid = -1;
496         int ret;
497         struct virtio_gpu_drv_cap_cache *cache_ent;
498         void *ptr;
499
500         if (vgdev->num_capsets == 0)
501                 return -ENOSYS;
502
503         /* don't allow userspace to pass 0 */
504         if (args->size == 0)
505                 return -EINVAL;
506
507         spin_lock(&vgdev->display_info_lock);
508         for (i = 0; i < vgdev->num_capsets; i++) {
509                 if (vgdev->capsets[i].id == args->cap_set_id) {
510                         if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
511                                 found_valid = i;
512                                 break;
513                         }
514                 }
515         }
516
517         if (found_valid == -1) {
518                 spin_unlock(&vgdev->display_info_lock);
519                 return -EINVAL;
520         }
521
522         host_caps_size = vgdev->capsets[found_valid].max_size;
523         /* only copy to user the minimum of the host caps size or the guest caps size */
524         size = min(args->size, host_caps_size);
525
526         list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
527                 if (cache_ent->id == args->cap_set_id &&
528                     cache_ent->version == args->cap_set_ver) {
529                         spin_unlock(&vgdev->display_info_lock);
530                         goto copy_exit;
531                 }
532         }
533         spin_unlock(&vgdev->display_info_lock);
534
535         /* not in cache - need to talk to hw */
536         virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
537                                   &cache_ent);
538
539 copy_exit:
540         ret = wait_event_timeout(vgdev->resp_wq,
541                                  atomic_read(&cache_ent->is_valid), 5 * HZ);
542         if (!ret)
543                 return -EBUSY;
544
545         /* is_valid check must proceed before copy of the cache entry. */
546         smp_rmb();
547
548         ptr = cache_ent->caps_cache;
549
550         if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
551                 return -EFAULT;
552
553         return 0;
554 }
555
556 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
557         DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
558                           DRM_RENDER_ALLOW),
559
560         DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
561                           DRM_RENDER_ALLOW),
562
563         DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
564                           DRM_RENDER_ALLOW),
565
566         DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
567                           virtio_gpu_resource_create_ioctl,
568                           DRM_RENDER_ALLOW),
569
570         DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
571                           DRM_RENDER_ALLOW),
572
573         /* make transfer async to the main ring? - no sure, can we
574          * thread these in the underlying GL
575          */
576         DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
577                           virtio_gpu_transfer_from_host_ioctl,
578                           DRM_RENDER_ALLOW),
579         DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
580                           virtio_gpu_transfer_to_host_ioctl,
581                           DRM_RENDER_ALLOW),
582
583         DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
584                           DRM_RENDER_ALLOW),
585
586         DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
587                           DRM_RENDER_ALLOW),
588 };