2 * Copyright (C) 2015 Red Hat, Inc.
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
29 #include <linux/dma-mapping.h>
30 #include <linux/virtio.h>
31 #include <linux/virtio_config.h>
32 #include <linux/virtio_ring.h>
34 #include "virtgpu_drv.h"
35 #include "virtgpu_trace.h"
37 #define MAX_INLINE_CMD_SIZE 96
38 #define MAX_INLINE_RESP_SIZE 24
39 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
40 + MAX_INLINE_CMD_SIZE \
41 + MAX_INLINE_RESP_SIZE)
43 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
45 struct drm_device *dev = vq->vdev->priv;
46 struct virtio_gpu_device *vgdev = dev->dev_private;
48 schedule_work(&vgdev->ctrlq.dequeue_work);
51 void virtio_gpu_cursor_ack(struct virtqueue *vq)
53 struct drm_device *dev = vq->vdev->priv;
54 struct virtio_gpu_device *vgdev = dev->dev_private;
56 schedule_work(&vgdev->cursorq.dequeue_work);
59 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
61 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
63 __alignof__(struct virtio_gpu_vbuffer),
70 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
72 kmem_cache_destroy(vgdev->vbufs);
76 static struct virtio_gpu_vbuffer*
77 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
78 int size, int resp_size, void *resp_buf,
79 virtio_gpu_resp_cb resp_cb)
81 struct virtio_gpu_vbuffer *vbuf;
83 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
85 return ERR_PTR(-ENOMEM);
87 BUG_ON(size > MAX_INLINE_CMD_SIZE);
88 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
91 vbuf->resp_cb = resp_cb;
92 vbuf->resp_size = resp_size;
93 if (resp_size <= MAX_INLINE_RESP_SIZE)
94 vbuf->resp_buf = (void *)vbuf->buf + size;
96 vbuf->resp_buf = resp_buf;
97 BUG_ON(!vbuf->resp_buf);
101 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
102 struct virtio_gpu_vbuffer **vbuffer_p,
105 struct virtio_gpu_vbuffer *vbuf;
107 vbuf = virtio_gpu_get_vbuf(vgdev, size,
108 sizeof(struct virtio_gpu_ctrl_hdr),
112 return ERR_CAST(vbuf);
118 static struct virtio_gpu_update_cursor*
119 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
120 struct virtio_gpu_vbuffer **vbuffer_p)
122 struct virtio_gpu_vbuffer *vbuf;
124 vbuf = virtio_gpu_get_vbuf
125 (vgdev, sizeof(struct virtio_gpu_update_cursor),
129 return ERR_CAST(vbuf);
132 return (struct virtio_gpu_update_cursor *)vbuf->buf;
135 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
136 virtio_gpu_resp_cb cb,
137 struct virtio_gpu_vbuffer **vbuffer_p,
138 int cmd_size, int resp_size,
141 struct virtio_gpu_vbuffer *vbuf;
143 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
144 resp_size, resp_buf, cb);
147 return ERR_CAST(vbuf);
150 return (struct virtio_gpu_command *)vbuf->buf;
153 static void free_vbuf(struct virtio_gpu_device *vgdev,
154 struct virtio_gpu_vbuffer *vbuf)
156 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
157 kfree(vbuf->resp_buf);
158 kvfree(vbuf->data_buf);
159 kmem_cache_free(vgdev->vbufs, vbuf);
162 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
164 struct virtio_gpu_vbuffer *vbuf;
168 while ((vbuf = virtqueue_get_buf(vq, &len))) {
169 list_add_tail(&vbuf->list, reclaim_list);
173 DRM_DEBUG("Huh? zero vbufs reclaimed");
176 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
178 struct virtio_gpu_device *vgdev =
179 container_of(work, struct virtio_gpu_device,
181 struct list_head reclaim_list;
182 struct virtio_gpu_vbuffer *entry, *tmp;
183 struct virtio_gpu_ctrl_hdr *resp;
186 INIT_LIST_HEAD(&reclaim_list);
187 spin_lock(&vgdev->ctrlq.qlock);
189 virtqueue_disable_cb(vgdev->ctrlq.vq);
190 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
192 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
193 spin_unlock(&vgdev->ctrlq.qlock);
195 list_for_each_entry(entry, &reclaim_list, list) {
196 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
198 trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
200 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
201 if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
202 struct virtio_gpu_ctrl_hdr *cmd;
203 cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
204 DRM_ERROR("response 0x%x (command 0x%x)\n",
205 le32_to_cpu(resp->type),
206 le32_to_cpu(cmd->type));
208 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
210 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
211 u64 f = le64_to_cpu(resp->fence_id);
214 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
215 __func__, fence_id, f);
221 entry->resp_cb(vgdev, entry);
223 wake_up(&vgdev->ctrlq.ack_queue);
226 virtio_gpu_fence_event_process(vgdev, fence_id);
228 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
230 virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
231 list_del(&entry->list);
232 free_vbuf(vgdev, entry);
236 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
238 struct virtio_gpu_device *vgdev =
239 container_of(work, struct virtio_gpu_device,
240 cursorq.dequeue_work);
241 struct list_head reclaim_list;
242 struct virtio_gpu_vbuffer *entry, *tmp;
244 INIT_LIST_HEAD(&reclaim_list);
245 spin_lock(&vgdev->cursorq.qlock);
247 virtqueue_disable_cb(vgdev->cursorq.vq);
248 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
249 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
250 spin_unlock(&vgdev->cursorq.qlock);
252 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
253 list_del(&entry->list);
254 free_vbuf(vgdev, entry);
256 wake_up(&vgdev->cursorq.ack_queue);
259 /* Create sg_table from a vmalloc'd buffer. */
260 static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
263 struct sg_table *sgt;
264 struct scatterlist *sg;
267 if (WARN_ON(!PAGE_ALIGNED(data)))
270 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
274 *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE);
275 ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL);
281 for_each_sg(sgt->sgl, sg, *sg_ents, i) {
282 pg = vmalloc_to_page(data);
289 s = min_t(int, PAGE_SIZE, size);
290 sg_set_page(sg, pg, s, 0);
299 static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
300 struct virtio_gpu_vbuffer *vbuf,
301 struct scatterlist *vout)
302 __releases(&vgdev->ctrlq.qlock)
303 __acquires(&vgdev->ctrlq.qlock)
305 struct virtqueue *vq = vgdev->ctrlq.vq;
306 struct scatterlist *sgs[3], vcmd, vresp;
307 int outcnt = 0, incnt = 0;
311 if (!vgdev->vqs_ready)
314 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
315 sgs[outcnt + incnt] = &vcmd;
319 sgs[outcnt + incnt] = vout;
323 if (vbuf->resp_size) {
324 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
325 sgs[outcnt + incnt] = &vresp;
330 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
331 if (ret == -ENOSPC) {
332 spin_unlock(&vgdev->ctrlq.qlock);
333 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
334 spin_lock(&vgdev->ctrlq.qlock);
337 trace_virtio_gpu_cmd_queue(vq,
338 (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
340 notify = virtqueue_kick_prepare(vq);
345 static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
346 struct virtio_gpu_vbuffer *vbuf,
347 struct virtio_gpu_ctrl_hdr *hdr,
348 struct virtio_gpu_fence *fence)
350 struct virtqueue *vq = vgdev->ctrlq.vq;
351 struct scatterlist *vout = NULL, sg;
352 struct sg_table *sgt = NULL;
356 if (vbuf->data_size) {
357 if (is_vmalloc_addr(vbuf->data_buf)) {
358 sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
364 sg_init_one(&sg, vbuf->data_buf, vbuf->data_size);
371 spin_lock(&vgdev->ctrlq.qlock);
374 * Make sure we have enouth space in the virtqueue. If not
375 * wait here until we have.
377 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
378 * to wait for free space, which can result in fence ids being
379 * submitted out-of-order.
381 if (vq->num_free < 2 + outcnt) {
382 spin_unlock(&vgdev->ctrlq.qlock);
383 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
388 virtio_gpu_fence_emit(vgdev, hdr, fence);
390 virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
391 virtio_gpu_array_unlock_resv(vbuf->objs);
394 notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
395 spin_unlock(&vgdev->ctrlq.qlock);
397 virtqueue_notify(vgdev->ctrlq.vq);
405 static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
406 struct virtio_gpu_vbuffer *vbuf)
408 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL);
411 static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
412 struct virtio_gpu_vbuffer *vbuf)
414 struct virtqueue *vq = vgdev->cursorq.vq;
415 struct scatterlist *sgs[1], ccmd;
420 if (!vgdev->vqs_ready)
423 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
427 spin_lock(&vgdev->cursorq.qlock);
429 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
430 if (ret == -ENOSPC) {
431 spin_unlock(&vgdev->cursorq.qlock);
432 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
433 spin_lock(&vgdev->cursorq.qlock);
436 trace_virtio_gpu_cmd_queue(vq,
437 (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
439 notify = virtqueue_kick_prepare(vq);
442 spin_unlock(&vgdev->cursorq.qlock);
445 virtqueue_notify(vq);
448 /* just create gem objects for userspace and long lived objects,
449 * just use dma_alloced pages for the queue objects?
452 /* create a basic resource */
453 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
454 struct virtio_gpu_object *bo,
455 struct virtio_gpu_object_params *params,
456 struct virtio_gpu_object_array *objs,
457 struct virtio_gpu_fence *fence)
459 struct virtio_gpu_resource_create_2d *cmd_p;
460 struct virtio_gpu_vbuffer *vbuf;
462 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
463 memset(cmd_p, 0, sizeof(*cmd_p));
466 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
467 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
468 cmd_p->format = cpu_to_le32(params->format);
469 cmd_p->width = cpu_to_le32(params->width);
470 cmd_p->height = cpu_to_le32(params->height);
472 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
476 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
477 uint32_t resource_id)
479 struct virtio_gpu_resource_unref *cmd_p;
480 struct virtio_gpu_vbuffer *vbuf;
482 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
483 memset(cmd_p, 0, sizeof(*cmd_p));
485 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
486 cmd_p->resource_id = cpu_to_le32(resource_id);
488 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
491 static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
492 uint32_t resource_id,
493 struct virtio_gpu_fence *fence)
495 struct virtio_gpu_resource_detach_backing *cmd_p;
496 struct virtio_gpu_vbuffer *vbuf;
498 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
499 memset(cmd_p, 0, sizeof(*cmd_p));
501 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
502 cmd_p->resource_id = cpu_to_le32(resource_id);
504 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
507 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
508 uint32_t scanout_id, uint32_t resource_id,
509 uint32_t width, uint32_t height,
510 uint32_t x, uint32_t y)
512 struct virtio_gpu_set_scanout *cmd_p;
513 struct virtio_gpu_vbuffer *vbuf;
515 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
516 memset(cmd_p, 0, sizeof(*cmd_p));
518 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
519 cmd_p->resource_id = cpu_to_le32(resource_id);
520 cmd_p->scanout_id = cpu_to_le32(scanout_id);
521 cmd_p->r.width = cpu_to_le32(width);
522 cmd_p->r.height = cpu_to_le32(height);
523 cmd_p->r.x = cpu_to_le32(x);
524 cmd_p->r.y = cpu_to_le32(y);
526 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
529 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
530 uint32_t resource_id,
531 uint32_t x, uint32_t y,
532 uint32_t width, uint32_t height)
534 struct virtio_gpu_resource_flush *cmd_p;
535 struct virtio_gpu_vbuffer *vbuf;
537 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
538 memset(cmd_p, 0, sizeof(*cmd_p));
540 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
541 cmd_p->resource_id = cpu_to_le32(resource_id);
542 cmd_p->r.width = cpu_to_le32(width);
543 cmd_p->r.height = cpu_to_le32(height);
544 cmd_p->r.x = cpu_to_le32(x);
545 cmd_p->r.y = cpu_to_le32(y);
547 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
550 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
552 uint32_t width, uint32_t height,
553 uint32_t x, uint32_t y,
554 struct virtio_gpu_object_array *objs,
555 struct virtio_gpu_fence *fence)
557 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
558 struct virtio_gpu_transfer_to_host_2d *cmd_p;
559 struct virtio_gpu_vbuffer *vbuf;
560 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
563 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
564 bo->pages->sgl, bo->pages->nents,
567 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
568 memset(cmd_p, 0, sizeof(*cmd_p));
571 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
572 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
573 cmd_p->offset = cpu_to_le64(offset);
574 cmd_p->r.width = cpu_to_le32(width);
575 cmd_p->r.height = cpu_to_le32(height);
576 cmd_p->r.x = cpu_to_le32(x);
577 cmd_p->r.y = cpu_to_le32(y);
579 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
583 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
584 uint32_t resource_id,
585 struct virtio_gpu_mem_entry *ents,
587 struct virtio_gpu_fence *fence)
589 struct virtio_gpu_resource_attach_backing *cmd_p;
590 struct virtio_gpu_vbuffer *vbuf;
592 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
593 memset(cmd_p, 0, sizeof(*cmd_p));
595 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
596 cmd_p->resource_id = cpu_to_le32(resource_id);
597 cmd_p->nr_entries = cpu_to_le32(nents);
599 vbuf->data_buf = ents;
600 vbuf->data_size = sizeof(*ents) * nents;
602 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
605 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
606 struct virtio_gpu_vbuffer *vbuf)
608 struct virtio_gpu_resp_display_info *resp =
609 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
612 spin_lock(&vgdev->display_info_lock);
613 for (i = 0; i < vgdev->num_scanouts; i++) {
614 vgdev->outputs[i].info = resp->pmodes[i];
615 if (resp->pmodes[i].enabled) {
616 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
617 le32_to_cpu(resp->pmodes[i].r.width),
618 le32_to_cpu(resp->pmodes[i].r.height),
619 le32_to_cpu(resp->pmodes[i].r.x),
620 le32_to_cpu(resp->pmodes[i].r.y));
622 DRM_DEBUG("output %d: disabled", i);
626 vgdev->display_info_pending = false;
627 spin_unlock(&vgdev->display_info_lock);
628 wake_up(&vgdev->resp_wq);
630 if (!drm_helper_hpd_irq_event(vgdev->ddev))
631 drm_kms_helper_hotplug_event(vgdev->ddev);
634 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
635 struct virtio_gpu_vbuffer *vbuf)
637 struct virtio_gpu_get_capset_info *cmd =
638 (struct virtio_gpu_get_capset_info *)vbuf->buf;
639 struct virtio_gpu_resp_capset_info *resp =
640 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
641 int i = le32_to_cpu(cmd->capset_index);
643 spin_lock(&vgdev->display_info_lock);
644 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
645 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
646 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
647 spin_unlock(&vgdev->display_info_lock);
648 wake_up(&vgdev->resp_wq);
651 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
652 struct virtio_gpu_vbuffer *vbuf)
654 struct virtio_gpu_get_capset *cmd =
655 (struct virtio_gpu_get_capset *)vbuf->buf;
656 struct virtio_gpu_resp_capset *resp =
657 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
658 struct virtio_gpu_drv_cap_cache *cache_ent;
660 spin_lock(&vgdev->display_info_lock);
661 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
662 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
663 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
664 memcpy(cache_ent->caps_cache, resp->capset_data,
666 /* Copy must occur before is_valid is signalled. */
668 atomic_set(&cache_ent->is_valid, 1);
672 spin_unlock(&vgdev->display_info_lock);
673 wake_up_all(&vgdev->resp_wq);
676 static int virtio_get_edid_block(void *data, u8 *buf,
677 unsigned int block, size_t len)
679 struct virtio_gpu_resp_edid *resp = data;
680 size_t start = block * EDID_LENGTH;
682 if (start + len > le32_to_cpu(resp->size))
684 memcpy(buf, resp->edid + start, len);
688 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
689 struct virtio_gpu_vbuffer *vbuf)
691 struct virtio_gpu_cmd_get_edid *cmd =
692 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
693 struct virtio_gpu_resp_edid *resp =
694 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
695 uint32_t scanout = le32_to_cpu(cmd->scanout);
696 struct virtio_gpu_output *output;
697 struct edid *new_edid, *old_edid;
699 if (scanout >= vgdev->num_scanouts)
701 output = vgdev->outputs + scanout;
703 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
704 drm_connector_update_edid_property(&output->conn, new_edid);
706 spin_lock(&vgdev->display_info_lock);
707 old_edid = output->edid;
708 output->edid = new_edid;
709 spin_unlock(&vgdev->display_info_lock);
712 wake_up(&vgdev->resp_wq);
715 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
717 struct virtio_gpu_ctrl_hdr *cmd_p;
718 struct virtio_gpu_vbuffer *vbuf;
721 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
726 cmd_p = virtio_gpu_alloc_cmd_resp
727 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
728 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
730 memset(cmd_p, 0, sizeof(*cmd_p));
732 vgdev->display_info_pending = true;
733 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
734 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
738 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
740 struct virtio_gpu_get_capset_info *cmd_p;
741 struct virtio_gpu_vbuffer *vbuf;
744 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
749 cmd_p = virtio_gpu_alloc_cmd_resp
750 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
751 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
753 memset(cmd_p, 0, sizeof(*cmd_p));
755 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
756 cmd_p->capset_index = cpu_to_le32(idx);
757 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
761 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
762 int idx, int version,
763 struct virtio_gpu_drv_cap_cache **cache_p)
765 struct virtio_gpu_get_capset *cmd_p;
766 struct virtio_gpu_vbuffer *vbuf;
768 struct virtio_gpu_drv_cap_cache *cache_ent;
769 struct virtio_gpu_drv_cap_cache *search_ent;
774 if (idx >= vgdev->num_capsets)
777 if (version > vgdev->capsets[idx].max_version)
780 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
784 max_size = vgdev->capsets[idx].max_size;
785 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
786 if (!cache_ent->caps_cache) {
791 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
794 kfree(cache_ent->caps_cache);
799 cache_ent->version = version;
800 cache_ent->id = vgdev->capsets[idx].id;
801 atomic_set(&cache_ent->is_valid, 0);
802 cache_ent->size = max_size;
803 spin_lock(&vgdev->display_info_lock);
804 /* Search while under lock in case it was added by another task. */
805 list_for_each_entry(search_ent, &vgdev->cap_cache, head) {
806 if (search_ent->id == vgdev->capsets[idx].id &&
807 search_ent->version == version) {
808 *cache_p = search_ent;
813 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
814 spin_unlock(&vgdev->display_info_lock);
817 /* Entry was found, so free everything that was just created. */
819 kfree(cache_ent->caps_cache);
824 cmd_p = virtio_gpu_alloc_cmd_resp
825 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
826 sizeof(struct virtio_gpu_resp_capset) + max_size,
828 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
829 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
830 cmd_p->capset_version = cpu_to_le32(version);
831 *cache_p = cache_ent;
832 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
837 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
839 struct virtio_gpu_cmd_get_edid *cmd_p;
840 struct virtio_gpu_vbuffer *vbuf;
844 if (WARN_ON(!vgdev->has_edid))
847 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
848 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
853 cmd_p = virtio_gpu_alloc_cmd_resp
854 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
855 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
857 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
858 cmd_p->scanout = cpu_to_le32(scanout);
859 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
865 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
866 uint32_t nlen, const char *name)
868 struct virtio_gpu_ctx_create *cmd_p;
869 struct virtio_gpu_vbuffer *vbuf;
871 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
872 memset(cmd_p, 0, sizeof(*cmd_p));
874 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
875 cmd_p->hdr.ctx_id = cpu_to_le32(id);
876 cmd_p->nlen = cpu_to_le32(nlen);
877 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
878 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
879 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
882 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
885 struct virtio_gpu_ctx_destroy *cmd_p;
886 struct virtio_gpu_vbuffer *vbuf;
888 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
889 memset(cmd_p, 0, sizeof(*cmd_p));
891 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
892 cmd_p->hdr.ctx_id = cpu_to_le32(id);
893 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
896 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
898 struct virtio_gpu_object_array *objs)
900 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
901 struct virtio_gpu_ctx_resource *cmd_p;
902 struct virtio_gpu_vbuffer *vbuf;
904 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
905 memset(cmd_p, 0, sizeof(*cmd_p));
908 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
909 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
910 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
911 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
915 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
917 struct virtio_gpu_object_array *objs)
919 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
920 struct virtio_gpu_ctx_resource *cmd_p;
921 struct virtio_gpu_vbuffer *vbuf;
923 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
924 memset(cmd_p, 0, sizeof(*cmd_p));
927 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
928 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
929 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
930 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
934 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
935 struct virtio_gpu_object *bo,
936 struct virtio_gpu_object_params *params,
937 struct virtio_gpu_object_array *objs,
938 struct virtio_gpu_fence *fence)
940 struct virtio_gpu_resource_create_3d *cmd_p;
941 struct virtio_gpu_vbuffer *vbuf;
943 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
944 memset(cmd_p, 0, sizeof(*cmd_p));
947 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
948 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
949 cmd_p->format = cpu_to_le32(params->format);
950 cmd_p->width = cpu_to_le32(params->width);
951 cmd_p->height = cpu_to_le32(params->height);
953 cmd_p->target = cpu_to_le32(params->target);
954 cmd_p->bind = cpu_to_le32(params->bind);
955 cmd_p->depth = cpu_to_le32(params->depth);
956 cmd_p->array_size = cpu_to_le32(params->array_size);
957 cmd_p->last_level = cpu_to_le32(params->last_level);
958 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
959 cmd_p->flags = cpu_to_le32(params->flags);
961 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
965 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
967 uint64_t offset, uint32_t level,
968 struct virtio_gpu_box *box,
969 struct virtio_gpu_object_array *objs,
970 struct virtio_gpu_fence *fence)
972 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
973 struct virtio_gpu_transfer_host_3d *cmd_p;
974 struct virtio_gpu_vbuffer *vbuf;
975 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
978 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
979 bo->pages->sgl, bo->pages->nents,
982 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
983 memset(cmd_p, 0, sizeof(*cmd_p));
987 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
988 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
989 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
991 cmd_p->offset = cpu_to_le64(offset);
992 cmd_p->level = cpu_to_le32(level);
994 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
997 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
999 uint64_t offset, uint32_t level,
1000 struct virtio_gpu_box *box,
1001 struct virtio_gpu_object_array *objs,
1002 struct virtio_gpu_fence *fence)
1004 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
1005 struct virtio_gpu_transfer_host_3d *cmd_p;
1006 struct virtio_gpu_vbuffer *vbuf;
1008 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1009 memset(cmd_p, 0, sizeof(*cmd_p));
1013 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
1014 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1015 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
1017 cmd_p->offset = cpu_to_le64(offset);
1018 cmd_p->level = cpu_to_le32(level);
1020 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1023 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
1024 void *data, uint32_t data_size,
1026 struct virtio_gpu_object_array *objs,
1027 struct virtio_gpu_fence *fence)
1029 struct virtio_gpu_cmd_submit *cmd_p;
1030 struct virtio_gpu_vbuffer *vbuf;
1032 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
1033 memset(cmd_p, 0, sizeof(*cmd_p));
1035 vbuf->data_buf = data;
1036 vbuf->data_size = data_size;
1039 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
1040 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
1041 cmd_p->size = cpu_to_le32(data_size);
1043 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
1046 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
1047 struct virtio_gpu_object *obj,
1048 struct virtio_gpu_fence *fence)
1050 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1051 struct virtio_gpu_mem_entry *ents;
1052 struct scatterlist *sg;
1055 if (WARN_ON_ONCE(!obj->created))
1057 if (WARN_ON_ONCE(obj->pages))
1060 ret = drm_gem_shmem_pin(&obj->base.base);
1064 obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base);
1065 if (obj->pages == NULL) {
1066 drm_gem_shmem_unpin(&obj->base.base);
1071 obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
1072 obj->pages->sgl, obj->pages->nents,
1074 nents = obj->mapped;
1076 nents = obj->pages->nents;
1079 /* gets freed when the ring has consumed it */
1080 ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
1083 DRM_ERROR("failed to allocate ent list\n");
1087 for_each_sg(obj->pages->sgl, sg, nents, si) {
1088 ents[si].addr = cpu_to_le64(use_dma_api
1089 ? sg_dma_address(sg)
1091 ents[si].length = cpu_to_le32(sg->length);
1092 ents[si].padding = 0;
1095 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
1101 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
1102 struct virtio_gpu_object *obj)
1104 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
1106 if (WARN_ON_ONCE(!obj->pages))
1109 if (use_dma_api && obj->mapped) {
1110 struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
1111 /* detach backing and wait for the host process it ... */
1112 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
1113 dma_fence_wait(&fence->f, true);
1114 dma_fence_put(&fence->f);
1116 /* ... then tear down iommu mappings */
1117 dma_unmap_sg(vgdev->vdev->dev.parent,
1118 obj->pages->sgl, obj->mapped,
1122 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
1125 sg_free_table(obj->pages);
1128 drm_gem_shmem_unpin(&obj->base.base);
1131 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1132 struct virtio_gpu_output *output)
1134 struct virtio_gpu_vbuffer *vbuf;
1135 struct virtio_gpu_update_cursor *cur_p;
1137 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1138 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1139 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1140 virtio_gpu_queue_cursor(vgdev, vbuf);