Merge branches 'pm-cpuidle', 'pm-core' and 'pm-sleep'
[linux-2.6-microblaze.git] / drivers / gpu / drm / virtio / virtgpu_ioctl.c
1 /*
2  * Copyright (C) 2015 Red Hat, Inc.
3  * All Rights Reserved.
4  *
5  * Authors:
6  *    Dave Airlie
7  *    Alon Levy
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice shall be included in
17  * all copies or substantial portions of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27
28 #include <linux/file.h>
29 #include <linux/sync_file.h>
30 #include <linux/uaccess.h>
31
32 #include <drm/drm_file.h>
33 #include <drm/virtgpu_drm.h>
34
35 #include "virtgpu_drv.h"
36
37 #define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \
38                                     VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
39                                     VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
40
41 static int virtio_gpu_fence_event_create(struct drm_device *dev,
42                                          struct drm_file *file,
43                                          struct virtio_gpu_fence *fence,
44                                          uint32_t ring_idx)
45 {
46         struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
47         struct virtio_gpu_fence_event *e = NULL;
48         int ret;
49
50         if (!(vfpriv->ring_idx_mask & BIT_ULL(ring_idx)))
51                 return 0;
52
53         e = kzalloc(sizeof(*e), GFP_KERNEL);
54         if (!e)
55                 return -ENOMEM;
56
57         e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
58         e->event.length = sizeof(e->event);
59
60         ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
61         if (ret)
62                 goto free;
63
64         fence->e = e;
65         return 0;
66 free:
67         kfree(e);
68         return ret;
69 }
70
71 /* Must be called with &virtio_gpu_fpriv.struct_mutex held. */
72 static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev,
73                                              struct virtio_gpu_fpriv *vfpriv)
74 {
75         char dbgname[TASK_COMM_LEN];
76
77         get_task_comm(dbgname, current);
78         virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
79                                       vfpriv->context_init, strlen(dbgname),
80                                       dbgname);
81
82         vfpriv->context_created = true;
83 }
84
85 void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
86 {
87         struct virtio_gpu_device *vgdev = dev->dev_private;
88         struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
89
90         mutex_lock(&vfpriv->context_lock);
91         if (vfpriv->context_created)
92                 goto out_unlock;
93
94         virtio_gpu_create_context_locked(vgdev, vfpriv);
95
96 out_unlock:
97         mutex_unlock(&vfpriv->context_lock);
98 }
99
100 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
101                                 struct drm_file *file)
102 {
103         struct virtio_gpu_device *vgdev = dev->dev_private;
104         struct drm_virtgpu_map *virtio_gpu_map = data;
105
106         return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
107                                          virtio_gpu_map->handle,
108                                          &virtio_gpu_map->offset);
109 }
110
111 /*
112  * Usage of execbuffer:
113  * Relocations need to take into account the full VIRTIO_GPUDrawable size.
114  * However, the command as passed from user space must *not* contain the initial
115  * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
116  */
117 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
118                                  struct drm_file *file)
119 {
120         struct drm_virtgpu_execbuffer *exbuf = data;
121         struct virtio_gpu_device *vgdev = dev->dev_private;
122         struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
123         struct virtio_gpu_fence *out_fence;
124         int ret;
125         uint32_t *bo_handles = NULL;
126         void __user *user_bo_handles = NULL;
127         struct virtio_gpu_object_array *buflist = NULL;
128         struct sync_file *sync_file;
129         int out_fence_fd = -1;
130         void *buf;
131         uint64_t fence_ctx;
132         uint32_t ring_idx;
133
134         fence_ctx = vgdev->fence_drv.context;
135         ring_idx = 0;
136
137         if (vgdev->has_virgl_3d == false)
138                 return -ENOSYS;
139
140         if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
141                 return -EINVAL;
142
143         if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX)) {
144                 if (exbuf->ring_idx >= vfpriv->num_rings)
145                         return -EINVAL;
146
147                 if (!vfpriv->base_fence_ctx)
148                         return -EINVAL;
149
150                 fence_ctx = vfpriv->base_fence_ctx;
151                 ring_idx = exbuf->ring_idx;
152         }
153
154         virtio_gpu_create_context(dev, file);
155         if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
156                 struct dma_fence *in_fence;
157
158                 in_fence = sync_file_get_fence(exbuf->fence_fd);
159
160                 if (!in_fence)
161                         return -EINVAL;
162
163                 /*
164                  * Wait if the fence is from a foreign context, or if the fence
165                  * array contains any fence from a foreign context.
166                  */
167                 ret = 0;
168                 if (!dma_fence_match_context(in_fence, fence_ctx + ring_idx))
169                         ret = dma_fence_wait(in_fence, true);
170
171                 dma_fence_put(in_fence);
172                 if (ret)
173                         return ret;
174         }
175
176         if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
177                 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
178                 if (out_fence_fd < 0)
179                         return out_fence_fd;
180         }
181
182         if (exbuf->num_bo_handles) {
183                 bo_handles = kvmalloc_array(exbuf->num_bo_handles,
184                                             sizeof(uint32_t), GFP_KERNEL);
185                 if (!bo_handles) {
186                         ret = -ENOMEM;
187                         goto out_unused_fd;
188                 }
189
190                 user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
191                 if (copy_from_user(bo_handles, user_bo_handles,
192                                    exbuf->num_bo_handles * sizeof(uint32_t))) {
193                         ret = -EFAULT;
194                         goto out_unused_fd;
195                 }
196
197                 buflist = virtio_gpu_array_from_handles(file, bo_handles,
198                                                         exbuf->num_bo_handles);
199                 if (!buflist) {
200                         ret = -ENOENT;
201                         goto out_unused_fd;
202                 }
203                 kvfree(bo_handles);
204                 bo_handles = NULL;
205         }
206
207         buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
208         if (IS_ERR(buf)) {
209                 ret = PTR_ERR(buf);
210                 goto out_unused_fd;
211         }
212
213         if (buflist) {
214                 ret = virtio_gpu_array_lock_resv(buflist);
215                 if (ret)
216                         goto out_memdup;
217         }
218
219         out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
220         if(!out_fence) {
221                 ret = -ENOMEM;
222                 goto out_unresv;
223         }
224
225         ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
226         if (ret)
227                 goto out_unresv;
228
229         if (out_fence_fd >= 0) {
230                 sync_file = sync_file_create(&out_fence->f);
231                 if (!sync_file) {
232                         dma_fence_put(&out_fence->f);
233                         ret = -ENOMEM;
234                         goto out_unresv;
235                 }
236
237                 exbuf->fence_fd = out_fence_fd;
238                 fd_install(out_fence_fd, sync_file->file);
239         }
240
241         virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
242                               vfpriv->ctx_id, buflist, out_fence);
243         dma_fence_put(&out_fence->f);
244         virtio_gpu_notify(vgdev);
245         return 0;
246
247 out_unresv:
248         if (buflist)
249                 virtio_gpu_array_unlock_resv(buflist);
250 out_memdup:
251         kvfree(buf);
252 out_unused_fd:
253         kvfree(bo_handles);
254         if (buflist)
255                 virtio_gpu_array_put_free(buflist);
256
257         if (out_fence_fd >= 0)
258                 put_unused_fd(out_fence_fd);
259
260         return ret;
261 }
262
263 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
264                                      struct drm_file *file)
265 {
266         struct virtio_gpu_device *vgdev = dev->dev_private;
267         struct drm_virtgpu_getparam *param = data;
268         int value;
269
270         switch (param->param) {
271         case VIRTGPU_PARAM_3D_FEATURES:
272                 value = vgdev->has_virgl_3d ? 1 : 0;
273                 break;
274         case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
275                 value = 1;
276                 break;
277         case VIRTGPU_PARAM_RESOURCE_BLOB:
278                 value = vgdev->has_resource_blob ? 1 : 0;
279                 break;
280         case VIRTGPU_PARAM_HOST_VISIBLE:
281                 value = vgdev->has_host_visible ? 1 : 0;
282                 break;
283         case VIRTGPU_PARAM_CROSS_DEVICE:
284                 value = vgdev->has_resource_assign_uuid ? 1 : 0;
285                 break;
286         case VIRTGPU_PARAM_CONTEXT_INIT:
287                 value = vgdev->has_context_init ? 1 : 0;
288                 break;
289         case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs:
290                 value = vgdev->capset_id_mask;
291                 break;
292         default:
293                 return -EINVAL;
294         }
295         if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
296                 return -EFAULT;
297
298         return 0;
299 }
300
301 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
302                                             struct drm_file *file)
303 {
304         struct virtio_gpu_device *vgdev = dev->dev_private;
305         struct drm_virtgpu_resource_create *rc = data;
306         struct virtio_gpu_fence *fence;
307         int ret;
308         struct virtio_gpu_object *qobj;
309         struct drm_gem_object *obj;
310         uint32_t handle = 0;
311         struct virtio_gpu_object_params params = { 0 };
312
313         if (vgdev->has_virgl_3d) {
314                 virtio_gpu_create_context(dev, file);
315                 params.virgl = true;
316                 params.target = rc->target;
317                 params.bind = rc->bind;
318                 params.depth = rc->depth;
319                 params.array_size = rc->array_size;
320                 params.last_level = rc->last_level;
321                 params.nr_samples = rc->nr_samples;
322                 params.flags = rc->flags;
323         } else {
324                 if (rc->depth > 1)
325                         return -EINVAL;
326                 if (rc->nr_samples > 1)
327                         return -EINVAL;
328                 if (rc->last_level > 1)
329                         return -EINVAL;
330                 if (rc->target != 2)
331                         return -EINVAL;
332                 if (rc->array_size > 1)
333                         return -EINVAL;
334         }
335
336         params.format = rc->format;
337         params.width = rc->width;
338         params.height = rc->height;
339         params.size = rc->size;
340         /* allocate a single page size object */
341         if (params.size == 0)
342                 params.size = PAGE_SIZE;
343
344         fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
345         if (!fence)
346                 return -ENOMEM;
347         ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
348         dma_fence_put(&fence->f);
349         if (ret < 0)
350                 return ret;
351         obj = &qobj->base.base;
352
353         ret = drm_gem_handle_create(file, obj, &handle);
354         if (ret) {
355                 drm_gem_object_release(obj);
356                 return ret;
357         }
358
359         rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
360         rc->bo_handle = handle;
361
362         /*
363          * The handle owns the reference now.  But we must drop our
364          * remaining reference *after* we no longer need to dereference
365          * the obj.  Otherwise userspace could guess the handle and
366          * race closing it from another thread.
367          */
368         drm_gem_object_put(obj);
369
370         return 0;
371 }
372
373 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
374                                           struct drm_file *file)
375 {
376         struct drm_virtgpu_resource_info *ri = data;
377         struct drm_gem_object *gobj = NULL;
378         struct virtio_gpu_object *qobj = NULL;
379
380         gobj = drm_gem_object_lookup(file, ri->bo_handle);
381         if (gobj == NULL)
382                 return -ENOENT;
383
384         qobj = gem_to_virtio_gpu_obj(gobj);
385
386         ri->size = qobj->base.base.size;
387         ri->res_handle = qobj->hw_res_handle;
388         if (qobj->host3d_blob || qobj->guest_blob)
389                 ri->blob_mem = qobj->blob_mem;
390
391         drm_gem_object_put(gobj);
392         return 0;
393 }
394
395 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
396                                                void *data,
397                                                struct drm_file *file)
398 {
399         struct virtio_gpu_device *vgdev = dev->dev_private;
400         struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
401         struct drm_virtgpu_3d_transfer_from_host *args = data;
402         struct virtio_gpu_object *bo;
403         struct virtio_gpu_object_array *objs;
404         struct virtio_gpu_fence *fence;
405         int ret;
406         u32 offset = args->offset;
407
408         if (vgdev->has_virgl_3d == false)
409                 return -ENOSYS;
410
411         virtio_gpu_create_context(dev, file);
412         objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
413         if (objs == NULL)
414                 return -ENOENT;
415
416         bo = gem_to_virtio_gpu_obj(objs->objs[0]);
417         if (bo->guest_blob && !bo->host3d_blob) {
418                 ret = -EINVAL;
419                 goto err_put_free;
420         }
421
422         if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
423                 ret = -EINVAL;
424                 goto err_put_free;
425         }
426
427         ret = virtio_gpu_array_lock_resv(objs);
428         if (ret != 0)
429                 goto err_put_free;
430
431         fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
432         if (!fence) {
433                 ret = -ENOMEM;
434                 goto err_unlock;
435         }
436
437         virtio_gpu_cmd_transfer_from_host_3d
438                 (vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
439                  args->layer_stride, &args->box, objs, fence);
440         dma_fence_put(&fence->f);
441         virtio_gpu_notify(vgdev);
442         return 0;
443
444 err_unlock:
445         virtio_gpu_array_unlock_resv(objs);
446 err_put_free:
447         virtio_gpu_array_put_free(objs);
448         return ret;
449 }
450
451 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
452                                              struct drm_file *file)
453 {
454         struct virtio_gpu_device *vgdev = dev->dev_private;
455         struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
456         struct drm_virtgpu_3d_transfer_to_host *args = data;
457         struct virtio_gpu_object *bo;
458         struct virtio_gpu_object_array *objs;
459         struct virtio_gpu_fence *fence;
460         int ret;
461         u32 offset = args->offset;
462
463         objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
464         if (objs == NULL)
465                 return -ENOENT;
466
467         bo = gem_to_virtio_gpu_obj(objs->objs[0]);
468         if (bo->guest_blob && !bo->host3d_blob) {
469                 ret = -EINVAL;
470                 goto err_put_free;
471         }
472
473         if (!vgdev->has_virgl_3d) {
474                 virtio_gpu_cmd_transfer_to_host_2d
475                         (vgdev, offset,
476                          args->box.w, args->box.h, args->box.x, args->box.y,
477                          objs, NULL);
478         } else {
479                 virtio_gpu_create_context(dev, file);
480
481                 if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
482                         ret = -EINVAL;
483                         goto err_put_free;
484                 }
485
486                 ret = virtio_gpu_array_lock_resv(objs);
487                 if (ret != 0)
488                         goto err_put_free;
489
490                 ret = -ENOMEM;
491                 fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
492                                                0);
493                 if (!fence)
494                         goto err_unlock;
495
496                 virtio_gpu_cmd_transfer_to_host_3d
497                         (vgdev,
498                          vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
499                          args->stride, args->layer_stride, &args->box, objs,
500                          fence);
501                 dma_fence_put(&fence->f);
502         }
503         virtio_gpu_notify(vgdev);
504         return 0;
505
506 err_unlock:
507         virtio_gpu_array_unlock_resv(objs);
508 err_put_free:
509         virtio_gpu_array_put_free(objs);
510         return ret;
511 }
512
513 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
514                                  struct drm_file *file)
515 {
516         struct drm_virtgpu_3d_wait *args = data;
517         struct drm_gem_object *obj;
518         long timeout = 15 * HZ;
519         int ret;
520
521         obj = drm_gem_object_lookup(file, args->handle);
522         if (obj == NULL)
523                 return -ENOENT;
524
525         if (args->flags & VIRTGPU_WAIT_NOWAIT) {
526                 ret = dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
527         } else {
528                 ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ,
529                                             true, timeout);
530         }
531         if (ret == 0)
532                 ret = -EBUSY;
533         else if (ret > 0)
534                 ret = 0;
535
536         drm_gem_object_put(obj);
537         return ret;
538 }
539
540 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
541                                 void *data, struct drm_file *file)
542 {
543         struct virtio_gpu_device *vgdev = dev->dev_private;
544         struct drm_virtgpu_get_caps *args = data;
545         unsigned size, host_caps_size;
546         int i;
547         int found_valid = -1;
548         int ret;
549         struct virtio_gpu_drv_cap_cache *cache_ent;
550         void *ptr;
551
552         if (vgdev->num_capsets == 0)
553                 return -ENOSYS;
554
555         /* don't allow userspace to pass 0 */
556         if (args->size == 0)
557                 return -EINVAL;
558
559         spin_lock(&vgdev->display_info_lock);
560         for (i = 0; i < vgdev->num_capsets; i++) {
561                 if (vgdev->capsets[i].id == args->cap_set_id) {
562                         if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
563                                 found_valid = i;
564                                 break;
565                         }
566                 }
567         }
568
569         if (found_valid == -1) {
570                 spin_unlock(&vgdev->display_info_lock);
571                 return -EINVAL;
572         }
573
574         host_caps_size = vgdev->capsets[found_valid].max_size;
575         /* only copy to user the minimum of the host caps size or the guest caps size */
576         size = min(args->size, host_caps_size);
577
578         list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
579                 if (cache_ent->id == args->cap_set_id &&
580                     cache_ent->version == args->cap_set_ver) {
581                         spin_unlock(&vgdev->display_info_lock);
582                         goto copy_exit;
583                 }
584         }
585         spin_unlock(&vgdev->display_info_lock);
586
587         /* not in cache - need to talk to hw */
588         ret = virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
589                                         &cache_ent);
590         if (ret)
591                 return ret;
592         virtio_gpu_notify(vgdev);
593
594 copy_exit:
595         ret = wait_event_timeout(vgdev->resp_wq,
596                                  atomic_read(&cache_ent->is_valid), 5 * HZ);
597         if (!ret)
598                 return -EBUSY;
599
600         /* is_valid check must proceed before copy of the cache entry. */
601         smp_rmb();
602
603         ptr = cache_ent->caps_cache;
604
605         if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
606                 return -EFAULT;
607
608         return 0;
609 }
610
611 static int verify_blob(struct virtio_gpu_device *vgdev,
612                        struct virtio_gpu_fpriv *vfpriv,
613                        struct virtio_gpu_object_params *params,
614                        struct drm_virtgpu_resource_create_blob *rc_blob,
615                        bool *guest_blob, bool *host3d_blob)
616 {
617         if (!vgdev->has_resource_blob)
618                 return -EINVAL;
619
620         if (rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK)
621                 return -EINVAL;
622
623         if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
624                 if (!vgdev->has_resource_assign_uuid)
625                         return -EINVAL;
626         }
627
628         switch (rc_blob->blob_mem) {
629         case VIRTGPU_BLOB_MEM_GUEST:
630                 *guest_blob = true;
631                 break;
632         case VIRTGPU_BLOB_MEM_HOST3D_GUEST:
633                 *guest_blob = true;
634                 fallthrough;
635         case VIRTGPU_BLOB_MEM_HOST3D:
636                 *host3d_blob = true;
637                 break;
638         default:
639                 return -EINVAL;
640         }
641
642         if (*host3d_blob) {
643                 if (!vgdev->has_virgl_3d)
644                         return -EINVAL;
645
646                 /* Must be dword aligned. */
647                 if (rc_blob->cmd_size % 4 != 0)
648                         return -EINVAL;
649
650                 params->ctx_id = vfpriv->ctx_id;
651                 params->blob_id = rc_blob->blob_id;
652         } else {
653                 if (rc_blob->blob_id != 0)
654                         return -EINVAL;
655
656                 if (rc_blob->cmd_size != 0)
657                         return -EINVAL;
658         }
659
660         params->blob_mem = rc_blob->blob_mem;
661         params->size = rc_blob->size;
662         params->blob = true;
663         params->blob_flags = rc_blob->blob_flags;
664         return 0;
665 }
666
667 static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
668                                                  void *data,
669                                                  struct drm_file *file)
670 {
671         int ret = 0;
672         uint32_t handle = 0;
673         bool guest_blob = false;
674         bool host3d_blob = false;
675         struct drm_gem_object *obj;
676         struct virtio_gpu_object *bo;
677         struct virtio_gpu_object_params params = { 0 };
678         struct virtio_gpu_device *vgdev = dev->dev_private;
679         struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
680         struct drm_virtgpu_resource_create_blob *rc_blob = data;
681
682         if (verify_blob(vgdev, vfpriv, &params, rc_blob,
683                         &guest_blob, &host3d_blob))
684                 return -EINVAL;
685
686         if (vgdev->has_virgl_3d)
687                 virtio_gpu_create_context(dev, file);
688
689         if (rc_blob->cmd_size) {
690                 void *buf;
691
692                 buf = memdup_user(u64_to_user_ptr(rc_blob->cmd),
693                                   rc_blob->cmd_size);
694
695                 if (IS_ERR(buf))
696                         return PTR_ERR(buf);
697
698                 virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
699                                       vfpriv->ctx_id, NULL, NULL);
700         }
701
702         if (guest_blob)
703                 ret = virtio_gpu_object_create(vgdev, &params, &bo, NULL);
704         else if (!guest_blob && host3d_blob)
705                 ret = virtio_gpu_vram_create(vgdev, &params, &bo);
706         else
707                 return -EINVAL;
708
709         if (ret < 0)
710                 return ret;
711
712         bo->guest_blob = guest_blob;
713         bo->host3d_blob = host3d_blob;
714         bo->blob_mem = rc_blob->blob_mem;
715         bo->blob_flags = rc_blob->blob_flags;
716
717         obj = &bo->base.base;
718         if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
719                 ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
720                 if (ret) {
721                         drm_gem_object_release(obj);
722                         return ret;
723                 }
724         }
725
726         ret = drm_gem_handle_create(file, obj, &handle);
727         if (ret) {
728                 drm_gem_object_release(obj);
729                 return ret;
730         }
731
732         rc_blob->res_handle = bo->hw_res_handle;
733         rc_blob->bo_handle = handle;
734
735         /*
736          * The handle owns the reference now.  But we must drop our
737          * remaining reference *after* we no longer need to dereference
738          * the obj.  Otherwise userspace could guess the handle and
739          * race closing it from another thread.
740          */
741         drm_gem_object_put(obj);
742
743         return 0;
744 }
745
746 static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
747                                          void *data, struct drm_file *file)
748 {
749         int ret = 0;
750         uint32_t num_params, i, param, value;
751         uint64_t valid_ring_mask;
752         size_t len;
753         struct drm_virtgpu_context_set_param *ctx_set_params = NULL;
754         struct virtio_gpu_device *vgdev = dev->dev_private;
755         struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
756         struct drm_virtgpu_context_init *args = data;
757
758         num_params = args->num_params;
759         len = num_params * sizeof(struct drm_virtgpu_context_set_param);
760
761         if (!vgdev->has_context_init || !vgdev->has_virgl_3d)
762                 return -EINVAL;
763
764         /* Number of unique parameters supported at this time. */
765         if (num_params > 3)
766                 return -EINVAL;
767
768         ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params),
769                                      len);
770
771         if (IS_ERR(ctx_set_params))
772                 return PTR_ERR(ctx_set_params);
773
774         mutex_lock(&vfpriv->context_lock);
775         if (vfpriv->context_created) {
776                 ret = -EEXIST;
777                 goto out_unlock;
778         }
779
780         for (i = 0; i < num_params; i++) {
781                 param = ctx_set_params[i].param;
782                 value = ctx_set_params[i].value;
783
784                 switch (param) {
785                 case VIRTGPU_CONTEXT_PARAM_CAPSET_ID:
786                         if (value > MAX_CAPSET_ID) {
787                                 ret = -EINVAL;
788                                 goto out_unlock;
789                         }
790
791                         if ((vgdev->capset_id_mask & (1ULL << value)) == 0) {
792                                 ret = -EINVAL;
793                                 goto out_unlock;
794                         }
795
796                         /* Context capset ID already set */
797                         if (vfpriv->context_init &
798                             VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK) {
799                                 ret = -EINVAL;
800                                 goto out_unlock;
801                         }
802
803                         vfpriv->context_init |= value;
804                         break;
805                 case VIRTGPU_CONTEXT_PARAM_NUM_RINGS:
806                         if (vfpriv->base_fence_ctx) {
807                                 ret = -EINVAL;
808                                 goto out_unlock;
809                         }
810
811                         if (value > MAX_RINGS) {
812                                 ret = -EINVAL;
813                                 goto out_unlock;
814                         }
815
816                         vfpriv->base_fence_ctx = dma_fence_context_alloc(value);
817                         vfpriv->num_rings = value;
818                         break;
819                 case VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK:
820                         if (vfpriv->ring_idx_mask) {
821                                 ret = -EINVAL;
822                                 goto out_unlock;
823                         }
824
825                         vfpriv->ring_idx_mask = value;
826                         break;
827                 default:
828                         ret = -EINVAL;
829                         goto out_unlock;
830                 }
831         }
832
833         if (vfpriv->ring_idx_mask) {
834                 valid_ring_mask = 0;
835                 for (i = 0; i < vfpriv->num_rings; i++)
836                         valid_ring_mask |= 1ULL << i;
837
838                 if (~valid_ring_mask & vfpriv->ring_idx_mask) {
839                         ret = -EINVAL;
840                         goto out_unlock;
841                 }
842         }
843
844         virtio_gpu_create_context_locked(vgdev, vfpriv);
845         virtio_gpu_notify(vgdev);
846
847 out_unlock:
848         mutex_unlock(&vfpriv->context_lock);
849         kfree(ctx_set_params);
850         return ret;
851 }
852
853 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
854         DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
855                           DRM_RENDER_ALLOW),
856
857         DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
858                           DRM_RENDER_ALLOW),
859
860         DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
861                           DRM_RENDER_ALLOW),
862
863         DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
864                           virtio_gpu_resource_create_ioctl,
865                           DRM_RENDER_ALLOW),
866
867         DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
868                           DRM_RENDER_ALLOW),
869
870         /* make transfer async to the main ring? - no sure, can we
871          * thread these in the underlying GL
872          */
873         DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
874                           virtio_gpu_transfer_from_host_ioctl,
875                           DRM_RENDER_ALLOW),
876         DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
877                           virtio_gpu_transfer_to_host_ioctl,
878                           DRM_RENDER_ALLOW),
879
880         DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
881                           DRM_RENDER_ALLOW),
882
883         DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
884                           DRM_RENDER_ALLOW),
885
886         DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
887                           virtio_gpu_resource_create_blob_ioctl,
888                           DRM_RENDER_ALLOW),
889
890         DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl,
891                           DRM_RENDER_ALLOW),
892 };