2 * Copyright (C) 2015 Red Hat, Inc.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice shall be included in
17 * all copies or substantial portions of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
28 #include <linux/file.h>
29 #include <linux/sync_file.h>
30 #include <linux/uaccess.h>
32 #include <drm/drm_file.h>
33 #include <drm/virtgpu_drm.h>
35 #include "virtgpu_drv.h"
37 void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
39 struct virtio_gpu_device *vgdev = dev->dev_private;
40 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
41 char dbgname[TASK_COMM_LEN];
43 mutex_lock(&vfpriv->context_lock);
44 if (vfpriv->context_created)
47 get_task_comm(dbgname, current);
48 virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
49 strlen(dbgname), dbgname);
50 vfpriv->context_created = true;
53 mutex_unlock(&vfpriv->context_lock);
56 static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
57 struct drm_file *file)
59 struct virtio_gpu_device *vgdev = dev->dev_private;
60 struct drm_virtgpu_map *virtio_gpu_map = data;
62 return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
63 virtio_gpu_map->handle,
64 &virtio_gpu_map->offset);
68 * Usage of execbuffer:
69 * Relocations need to take into account the full VIRTIO_GPUDrawable size.
70 * However, the command as passed from user space must *not* contain the initial
71 * VIRTIO_GPUReleaseInfo struct (first XXX bytes)
73 static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
74 struct drm_file *file)
76 struct drm_virtgpu_execbuffer *exbuf = data;
77 struct virtio_gpu_device *vgdev = dev->dev_private;
78 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
79 struct virtio_gpu_fence *out_fence;
81 uint32_t *bo_handles = NULL;
82 void __user *user_bo_handles = NULL;
83 struct virtio_gpu_object_array *buflist = NULL;
84 struct sync_file *sync_file;
85 int in_fence_fd = exbuf->fence_fd;
86 int out_fence_fd = -1;
89 if (vgdev->has_virgl_3d == false)
92 if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
97 virtio_gpu_create_context(dev, file);
98 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
99 struct dma_fence *in_fence;
101 in_fence = sync_file_get_fence(in_fence_fd);
107 * Wait if the fence is from a foreign context, or if the fence
108 * array contains any fence from a foreign context.
111 if (!dma_fence_match_context(in_fence, vgdev->fence_drv.context))
112 ret = dma_fence_wait(in_fence, true);
114 dma_fence_put(in_fence);
119 if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) {
120 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
121 if (out_fence_fd < 0)
125 if (exbuf->num_bo_handles) {
126 bo_handles = kvmalloc_array(exbuf->num_bo_handles,
127 sizeof(uint32_t), GFP_KERNEL);
133 user_bo_handles = u64_to_user_ptr(exbuf->bo_handles);
134 if (copy_from_user(bo_handles, user_bo_handles,
135 exbuf->num_bo_handles * sizeof(uint32_t))) {
140 buflist = virtio_gpu_array_from_handles(file, bo_handles,
141 exbuf->num_bo_handles);
150 buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
157 ret = virtio_gpu_array_lock_resv(buflist);
162 out_fence = virtio_gpu_fence_alloc(vgdev);
168 if (out_fence_fd >= 0) {
169 sync_file = sync_file_create(&out_fence->f);
171 dma_fence_put(&out_fence->f);
176 exbuf->fence_fd = out_fence_fd;
177 fd_install(out_fence_fd, sync_file->file);
180 virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
181 vfpriv->ctx_id, buflist, out_fence);
182 virtio_gpu_notify(vgdev);
187 virtio_gpu_array_unlock_resv(buflist);
193 virtio_gpu_array_put_free(buflist);
195 if (out_fence_fd >= 0)
196 put_unused_fd(out_fence_fd);
201 static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
202 struct drm_file *file)
204 struct virtio_gpu_device *vgdev = dev->dev_private;
205 struct drm_virtgpu_getparam *param = data;
208 switch (param->param) {
209 case VIRTGPU_PARAM_3D_FEATURES:
210 value = vgdev->has_virgl_3d == true ? 1 : 0;
212 case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
218 if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int)))
224 static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
225 struct drm_file *file)
227 struct virtio_gpu_device *vgdev = dev->dev_private;
228 struct drm_virtgpu_resource_create *rc = data;
229 struct virtio_gpu_fence *fence;
231 struct virtio_gpu_object *qobj;
232 struct drm_gem_object *obj;
234 struct virtio_gpu_object_params params = { 0 };
236 if (vgdev->has_virgl_3d) {
237 virtio_gpu_create_context(dev, file);
239 params.target = rc->target;
240 params.bind = rc->bind;
241 params.depth = rc->depth;
242 params.array_size = rc->array_size;
243 params.last_level = rc->last_level;
244 params.nr_samples = rc->nr_samples;
245 params.flags = rc->flags;
249 if (rc->nr_samples > 1)
251 if (rc->last_level > 1)
255 if (rc->array_size > 1)
259 params.format = rc->format;
260 params.width = rc->width;
261 params.height = rc->height;
262 params.size = rc->size;
263 /* allocate a single page size object */
264 if (params.size == 0)
265 params.size = PAGE_SIZE;
267 fence = virtio_gpu_fence_alloc(vgdev);
270 ret = virtio_gpu_object_create(vgdev, ¶ms, &qobj, fence);
271 dma_fence_put(&fence->f);
274 obj = &qobj->base.base;
276 ret = drm_gem_handle_create(file, obj, &handle);
278 drm_gem_object_release(obj);
281 drm_gem_object_put_unlocked(obj);
283 rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
284 rc->bo_handle = handle;
288 static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
289 struct drm_file *file)
291 struct drm_virtgpu_resource_info *ri = data;
292 struct drm_gem_object *gobj = NULL;
293 struct virtio_gpu_object *qobj = NULL;
295 gobj = drm_gem_object_lookup(file, ri->bo_handle);
299 qobj = gem_to_virtio_gpu_obj(gobj);
301 ri->size = qobj->base.base.size;
302 ri->res_handle = qobj->hw_res_handle;
303 drm_gem_object_put_unlocked(gobj);
307 static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
309 struct drm_file *file)
311 struct virtio_gpu_device *vgdev = dev->dev_private;
312 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
313 struct drm_virtgpu_3d_transfer_from_host *args = data;
314 struct virtio_gpu_object_array *objs;
315 struct virtio_gpu_fence *fence;
317 u32 offset = args->offset;
319 if (vgdev->has_virgl_3d == false)
322 virtio_gpu_create_context(dev, file);
323 objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
327 ret = virtio_gpu_array_lock_resv(objs);
331 fence = virtio_gpu_fence_alloc(vgdev);
336 virtio_gpu_cmd_transfer_from_host_3d
337 (vgdev, vfpriv->ctx_id, offset, args->level,
338 &args->box, objs, fence);
339 dma_fence_put(&fence->f);
340 virtio_gpu_notify(vgdev);
344 virtio_gpu_array_unlock_resv(objs);
346 virtio_gpu_array_put_free(objs);
350 static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
351 struct drm_file *file)
353 struct virtio_gpu_device *vgdev = dev->dev_private;
354 struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
355 struct drm_virtgpu_3d_transfer_to_host *args = data;
356 struct virtio_gpu_object_array *objs;
357 struct virtio_gpu_fence *fence;
359 u32 offset = args->offset;
361 objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
365 if (!vgdev->has_virgl_3d) {
366 virtio_gpu_cmd_transfer_to_host_2d
368 args->box.w, args->box.h, args->box.x, args->box.y,
371 virtio_gpu_create_context(dev, file);
372 ret = virtio_gpu_array_lock_resv(objs);
377 fence = virtio_gpu_fence_alloc(vgdev);
381 virtio_gpu_cmd_transfer_to_host_3d
383 vfpriv ? vfpriv->ctx_id : 0, offset,
384 args->level, &args->box, objs, fence);
385 dma_fence_put(&fence->f);
387 virtio_gpu_notify(vgdev);
391 virtio_gpu_array_unlock_resv(objs);
393 virtio_gpu_array_put_free(objs);
397 static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
398 struct drm_file *file)
400 struct drm_virtgpu_3d_wait *args = data;
401 struct drm_gem_object *obj;
402 long timeout = 15 * HZ;
405 obj = drm_gem_object_lookup(file, args->handle);
409 if (args->flags & VIRTGPU_WAIT_NOWAIT) {
410 ret = dma_resv_test_signaled_rcu(obj->resv, true);
412 ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
420 drm_gem_object_put_unlocked(obj);
424 static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
425 void *data, struct drm_file *file)
427 struct virtio_gpu_device *vgdev = dev->dev_private;
428 struct drm_virtgpu_get_caps *args = data;
429 unsigned size, host_caps_size;
431 int found_valid = -1;
433 struct virtio_gpu_drv_cap_cache *cache_ent;
436 if (vgdev->num_capsets == 0)
439 /* don't allow userspace to pass 0 */
443 spin_lock(&vgdev->display_info_lock);
444 for (i = 0; i < vgdev->num_capsets; i++) {
445 if (vgdev->capsets[i].id == args->cap_set_id) {
446 if (vgdev->capsets[i].max_version >= args->cap_set_ver) {
453 if (found_valid == -1) {
454 spin_unlock(&vgdev->display_info_lock);
458 host_caps_size = vgdev->capsets[found_valid].max_size;
459 /* only copy to user the minimum of the host caps size or the guest caps size */
460 size = min(args->size, host_caps_size);
462 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
463 if (cache_ent->id == args->cap_set_id &&
464 cache_ent->version == args->cap_set_ver) {
465 spin_unlock(&vgdev->display_info_lock);
469 spin_unlock(&vgdev->display_info_lock);
471 /* not in cache - need to talk to hw */
472 virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
474 virtio_gpu_notify(vgdev);
477 ret = wait_event_timeout(vgdev->resp_wq,
478 atomic_read(&cache_ent->is_valid), 5 * HZ);
482 /* is_valid check must proceed before copy of the cache entry. */
485 ptr = cache_ent->caps_cache;
487 if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size))
493 struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
494 DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
497 DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl,
500 DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl,
503 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE,
504 virtio_gpu_resource_create_ioctl,
507 DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl,
510 /* make transfer async to the main ring? - no sure, can we
511 * thread these in the underlying GL
513 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST,
514 virtio_gpu_transfer_from_host_ioctl,
516 DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST,
517 virtio_gpu_transfer_to_host_ioctl,
520 DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl,
523 DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,