2 * Copyright (C) 2015 Red Hat, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/dma-mapping.h>
27 #include <linux/moduleparam.h>
29 #include "virtgpu_drv.h"
31 static int virtio_gpu_virglrenderer_workaround = 1;
32 module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
34 static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
37 if (virtio_gpu_virglrenderer_workaround) {
39 * Hack to avoid re-using resource IDs.
41 * virglrenderer versions up to (and including) 0.7.0
42 * can't deal with that. virglrenderer commit
43 * "f91a9dd35715 Fix unlinking resources from hash
44 * table." (Feb 2019) fixes the bug.
46 static atomic_t seqno = ATOMIC_INIT(0);
47 int handle = atomic_inc_return(&seqno);
50 int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
58 static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id)
60 if (!virtio_gpu_virglrenderer_workaround) {
61 ida_free(&vgdev->resource_ida, id - 1);
65 void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
67 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
69 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
70 if (virtio_gpu_is_shmem(bo)) {
71 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
75 dma_unmap_sg(vgdev->vdev->dev.parent,
76 shmem->pages->sgl, shmem->mapped,
81 sg_free_table(shmem->pages);
84 drm_gem_shmem_unpin(&bo->base.base);
87 drm_gem_shmem_free_object(&bo->base.base);
91 static void virtio_gpu_free_object(struct drm_gem_object *obj)
93 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
94 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
97 virtio_gpu_cmd_unref_resource(vgdev, bo);
98 virtio_gpu_notify(vgdev);
99 /* completion handler calls virtio_gpu_cleanup_object() */
102 virtio_gpu_cleanup_object(bo);
105 static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
106 .free = virtio_gpu_free_object,
107 .open = virtio_gpu_gem_object_open,
108 .close = virtio_gpu_gem_object_close,
110 .print_info = drm_gem_shmem_print_info,
111 .pin = drm_gem_shmem_pin,
112 .unpin = drm_gem_shmem_unpin,
113 .get_sg_table = drm_gem_shmem_get_sg_table,
114 .vmap = drm_gem_shmem_vmap,
115 .vunmap = drm_gem_shmem_vunmap,
116 .mmap = drm_gem_shmem_mmap,
119 bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
121 return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
124 struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
127 struct virtio_gpu_object_shmem *shmem;
128 struct drm_gem_shmem_object *dshmem;
130 shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
134 dshmem = &shmem->base.base;
135 dshmem->base.funcs = &virtio_gpu_shmem_funcs;
136 dshmem->map_cached = true;
137 return &dshmem->base;
140 static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
141 struct virtio_gpu_object *bo,
142 struct virtio_gpu_mem_entry **ents,
145 bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
146 struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
147 struct scatterlist *sg;
150 ret = drm_gem_shmem_pin(&bo->base.base);
154 shmem->pages = drm_gem_shmem_get_pages_sgt(&bo->base.base);
156 drm_gem_shmem_unpin(&bo->base.base);
161 shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
165 *nents = shmem->mapped;
167 *nents = shmem->pages->nents;
170 *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
173 DRM_ERROR("failed to allocate ent list\n");
177 for_each_sg(shmem->pages->sgl, sg, *nents, si) {
178 (*ents)[si].addr = cpu_to_le64(use_dma_api
181 (*ents)[si].length = cpu_to_le32(sg->length);
182 (*ents)[si].padding = 0;
187 int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
188 struct virtio_gpu_object_params *params,
189 struct virtio_gpu_object **bo_ptr,
190 struct virtio_gpu_fence *fence)
192 struct virtio_gpu_object_array *objs = NULL;
193 struct drm_gem_shmem_object *shmem_obj;
194 struct virtio_gpu_object *bo;
195 struct virtio_gpu_mem_entry *ents;
201 params->size = roundup(params->size, PAGE_SIZE);
202 shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size);
203 if (IS_ERR(shmem_obj))
204 return PTR_ERR(shmem_obj);
205 bo = gem_to_virtio_gpu_obj(&shmem_obj->base);
207 ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle);
211 bo->dumb = params->dumb;
215 objs = virtio_gpu_array_alloc(1);
218 virtio_gpu_array_add_obj(objs, &bo->base.base);
220 ret = virtio_gpu_array_lock_resv(objs);
226 virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
229 virtio_gpu_cmd_create_resource(vgdev, bo, params,
233 ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
235 virtio_gpu_free_object(&shmem_obj->base);
239 virtio_gpu_object_attach(vgdev, bo, ents, nents);
245 virtio_gpu_array_put_free(objs);
247 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
249 drm_gem_shmem_free_object(&shmem_obj->base);