2 * KVMGT - the implementation of Intel mediated pass-through framework for KVM
4 * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Kevin Tian <kevin.tian@intel.com>
27 * Jike Song <jike.song@intel.com>
28 * Xiaoguang Chen <xiaoguang.chen@intel.com>
31 #include <linux/init.h>
32 #include <linux/device.h>
34 #include <linux/kthread.h>
35 #include <linux/sched/mm.h>
36 #include <linux/types.h>
37 #include <linux/list.h>
38 #include <linux/rbtree.h>
39 #include <linux/spinlock.h>
40 #include <linux/eventfd.h>
41 #include <linux/uuid.h>
42 #include <linux/kvm_host.h>
43 #include <linux/vfio.h>
44 #include <linux/mdev.h>
45 #include <linux/debugfs.h>
47 #include <linux/nospec.h>
52 static const struct intel_gvt_ops *intel_gvt_ops;
54 /* helper macros copied from vfio-pci */
55 #define VFIO_PCI_OFFSET_SHIFT 40
56 #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT)
57 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
58 #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
60 #define EDID_BLOB_OFFSET (PAGE_SIZE/2)
62 #define OPREGION_SIGNATURE "IntelGraphicsMem"
65 struct intel_vgpu_regops {
66 size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
67 size_t count, loff_t *ppos, bool iswrite);
68 void (*release)(struct intel_vgpu *vgpu,
69 struct vfio_region *region);
77 const struct intel_vgpu_regops *ops;
81 struct vfio_edid_region {
82 struct vfio_region_gfx_edid vfio_edid_regs;
88 struct hlist_node hnode;
91 struct kvmgt_guest_info {
93 struct intel_vgpu *vgpu;
94 struct kvm_page_track_notifier_node track_node;
95 #define NR_BKT (1 << 18)
96 struct hlist_head ptable[NR_BKT];
98 struct dentry *debugfs_cache_entries;
102 struct intel_vgpu *vgpu;
103 struct rb_node gfn_node;
104 struct rb_node dma_addr_node;
112 struct intel_vgpu *vgpu;
113 struct mdev_device *mdev;
114 struct vfio_region *region;
116 struct eventfd_ctx *intx_trigger;
117 struct eventfd_ctx *msi_trigger;
120 * Two caches are used to avoid mapping duplicated pages (eg.
121 * scratch pages). This help to reduce dma setup overhead.
123 struct rb_root gfn_cache;
124 struct rb_root dma_addr_cache;
125 unsigned long nr_cache_entries;
126 struct mutex cache_lock;
128 struct notifier_block iommu_notifier;
129 struct notifier_block group_notifier;
131 struct work_struct release_work;
133 struct vfio_device *vfio_device;
134 struct vfio_group *vfio_group;
137 static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
139 return intel_vgpu_vdev(vgpu);
142 static inline bool handle_valid(unsigned long handle)
144 return !!(handle & ~0xff);
147 static int kvmgt_guest_init(struct mdev_device *mdev);
148 static void intel_vgpu_release_work(struct work_struct *work);
149 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
151 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
154 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
155 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
160 total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
162 for (npage = 0; npage < total_pages; npage++) {
163 unsigned long cur_gfn = gfn + npage;
165 ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1);
166 drm_WARN_ON(&i915->drm, ret != 1);
170 /* Pin a normal or compound guest page for dma. */
171 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
172 unsigned long size, struct page **page)
174 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
175 unsigned long base_pfn = 0;
180 total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
182 * We pin the pages one-by-one to avoid allocating a big arrary
183 * on stack to hold pfns.
185 for (npage = 0; npage < total_pages; npage++) {
186 unsigned long cur_gfn = gfn + npage;
189 ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1,
190 IOMMU_READ | IOMMU_WRITE, &pfn);
192 gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
197 if (!pfn_valid(pfn)) {
198 gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
206 else if (base_pfn + npage != pfn) {
207 gvt_vgpu_err("The pages are not continuous\n");
214 *page = pfn_to_page(base_pfn);
217 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
221 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
222 dma_addr_t *dma_addr, unsigned long size)
224 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
225 struct page *page = NULL;
228 ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
232 /* Setup DMA mapping. */
233 *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
234 if (dma_mapping_error(dev, *dma_addr)) {
235 gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
236 page_to_pfn(page), ret);
237 gvt_unpin_guest_page(vgpu, gfn, size);
244 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
245 dma_addr_t dma_addr, unsigned long size)
247 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
249 dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
250 gvt_unpin_guest_page(vgpu, gfn, size);
253 static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
256 struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node;
260 itr = rb_entry(node, struct gvt_dma, dma_addr_node);
262 if (dma_addr < itr->dma_addr)
263 node = node->rb_left;
264 else if (dma_addr > itr->dma_addr)
265 node = node->rb_right;
272 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
274 struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node;
278 itr = rb_entry(node, struct gvt_dma, gfn_node);
281 node = node->rb_left;
282 else if (gfn > itr->gfn)
283 node = node->rb_right;
290 static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
291 dma_addr_t dma_addr, unsigned long size)
293 struct gvt_dma *new, *itr;
294 struct rb_node **link, *parent = NULL;
295 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
297 new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
303 new->dma_addr = dma_addr;
305 kref_init(&new->ref);
307 /* gfn_cache maps gfn to struct gvt_dma. */
308 link = &vdev->gfn_cache.rb_node;
311 itr = rb_entry(parent, struct gvt_dma, gfn_node);
314 link = &parent->rb_left;
316 link = &parent->rb_right;
318 rb_link_node(&new->gfn_node, parent, link);
319 rb_insert_color(&new->gfn_node, &vdev->gfn_cache);
321 /* dma_addr_cache maps dma addr to struct gvt_dma. */
323 link = &vdev->dma_addr_cache.rb_node;
326 itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
328 if (dma_addr < itr->dma_addr)
329 link = &parent->rb_left;
331 link = &parent->rb_right;
333 rb_link_node(&new->dma_addr_node, parent, link);
334 rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache);
336 vdev->nr_cache_entries++;
340 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
341 struct gvt_dma *entry)
343 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
345 rb_erase(&entry->gfn_node, &vdev->gfn_cache);
346 rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache);
348 vdev->nr_cache_entries--;
351 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
354 struct rb_node *node = NULL;
355 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
358 mutex_lock(&vdev->cache_lock);
359 node = rb_first(&vdev->gfn_cache);
361 mutex_unlock(&vdev->cache_lock);
364 dma = rb_entry(node, struct gvt_dma, gfn_node);
365 gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
366 __gvt_cache_remove_entry(vgpu, dma);
367 mutex_unlock(&vdev->cache_lock);
371 static void gvt_cache_init(struct intel_vgpu *vgpu)
373 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
375 vdev->gfn_cache = RB_ROOT;
376 vdev->dma_addr_cache = RB_ROOT;
377 vdev->nr_cache_entries = 0;
378 mutex_init(&vdev->cache_lock);
381 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
383 hash_init(info->ptable);
386 static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
388 struct kvmgt_pgfn *p;
389 struct hlist_node *tmp;
392 hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
398 static struct kvmgt_pgfn *
399 __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
401 struct kvmgt_pgfn *p, *res = NULL;
403 hash_for_each_possible(info->ptable, p, hnode, gfn) {
413 static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
416 struct kvmgt_pgfn *p;
418 p = __kvmgt_protect_table_find(info, gfn);
422 static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
424 struct kvmgt_pgfn *p;
426 if (kvmgt_gfn_is_write_protected(info, gfn))
429 p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
430 if (WARN(!p, "gfn: 0x%llx\n", gfn))
434 hash_add(info->ptable, &p->hnode, gfn);
437 static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
440 struct kvmgt_pgfn *p;
442 p = __kvmgt_protect_table_find(info, gfn);
449 static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
450 size_t count, loff_t *ppos, bool iswrite)
452 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
453 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
454 VFIO_PCI_NUM_REGIONS;
455 void *base = vdev->region[i].data;
456 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
459 if (pos >= vdev->region[i].size || iswrite) {
460 gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
463 count = min(count, (size_t)(vdev->region[i].size - pos));
464 memcpy(buf, base + pos, count);
469 static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
470 struct vfio_region *region)
474 static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
475 .rw = intel_vgpu_reg_rw_opregion,
476 .release = intel_vgpu_reg_release_opregion,
479 static int handle_edid_regs(struct intel_vgpu *vgpu,
480 struct vfio_edid_region *region, char *buf,
481 size_t count, u16 offset, bool is_write)
483 struct vfio_region_gfx_edid *regs = ®ion->vfio_edid_regs;
486 if (offset + count > sizeof(*regs))
493 data = *((unsigned int *)buf);
495 case offsetof(struct vfio_region_gfx_edid, link_state):
496 if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) {
497 if (!drm_edid_block_valid(
498 (u8 *)region->edid_blob,
502 gvt_vgpu_err("invalid EDID blob\n");
505 intel_gvt_ops->emulate_hotplug(vgpu, true);
506 } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
507 intel_gvt_ops->emulate_hotplug(vgpu, false);
509 gvt_vgpu_err("invalid EDID link state %d\n",
513 regs->link_state = data;
515 case offsetof(struct vfio_region_gfx_edid, edid_size):
516 if (data > regs->edid_max_size) {
517 gvt_vgpu_err("EDID size is bigger than %d!\n",
518 regs->edid_max_size);
521 regs->edid_size = data;
525 gvt_vgpu_err("write read-only EDID region at offset %d\n",
530 memcpy(buf, (char *)regs + offset, count);
536 static int handle_edid_blob(struct vfio_edid_region *region, char *buf,
537 size_t count, u16 offset, bool is_write)
539 if (offset + count > region->vfio_edid_regs.edid_size)
543 memcpy(region->edid_blob + offset, buf, count);
545 memcpy(buf, region->edid_blob + offset, count);
550 static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
551 size_t count, loff_t *ppos, bool iswrite)
554 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
555 VFIO_PCI_NUM_REGIONS;
556 struct vfio_edid_region *region =
557 (struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data;
558 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
560 if (pos < region->vfio_edid_regs.edid_offset) {
561 ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite);
563 pos -= EDID_BLOB_OFFSET;
564 ret = handle_edid_blob(region, buf, count, pos, iswrite);
568 gvt_vgpu_err("failed to access EDID region\n");
573 static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu,
574 struct vfio_region *region)
579 static const struct intel_vgpu_regops intel_vgpu_regops_edid = {
580 .rw = intel_vgpu_reg_rw_edid,
581 .release = intel_vgpu_reg_release_edid,
584 static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
585 unsigned int type, unsigned int subtype,
586 const struct intel_vgpu_regops *ops,
587 size_t size, u32 flags, void *data)
589 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
590 struct vfio_region *region;
592 region = krealloc(vdev->region,
593 (vdev->num_regions + 1) * sizeof(*region),
598 vdev->region = region;
599 vdev->region[vdev->num_regions].type = type;
600 vdev->region[vdev->num_regions].subtype = subtype;
601 vdev->region[vdev->num_regions].ops = ops;
602 vdev->region[vdev->num_regions].size = size;
603 vdev->region[vdev->num_regions].flags = flags;
604 vdev->region[vdev->num_regions].data = data;
609 static int kvmgt_get_vfio_device(void *p_vgpu)
611 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
612 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
614 vdev->vfio_device = vfio_device_get_from_dev(
615 mdev_dev(vdev->mdev));
616 if (!vdev->vfio_device) {
617 gvt_vgpu_err("failed to get vfio device\n");
624 static int kvmgt_set_opregion(void *p_vgpu)
626 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
630 /* Each vgpu has its own opregion, although VFIO would create another
631 * one later. This one is used to expose opregion to VFIO. And the
632 * other one created by VFIO later, is used by guest actually.
634 base = vgpu_opregion(vgpu)->va;
638 if (memcmp(base, OPREGION_SIGNATURE, 16)) {
643 ret = intel_vgpu_register_reg(vgpu,
644 PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
645 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
646 &intel_vgpu_regops_opregion, OPREGION_SIZE,
647 VFIO_REGION_INFO_FLAG_READ, base);
652 static int kvmgt_set_edid(void *p_vgpu, int port_num)
654 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
655 struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
656 struct vfio_edid_region *base;
659 base = kzalloc(sizeof(*base), GFP_KERNEL);
663 /* TODO: Add multi-port and EDID extension block support */
664 base->vfio_edid_regs.edid_offset = EDID_BLOB_OFFSET;
665 base->vfio_edid_regs.edid_max_size = EDID_SIZE;
666 base->vfio_edid_regs.edid_size = EDID_SIZE;
667 base->vfio_edid_regs.max_xres = vgpu_edid_xres(port->id);
668 base->vfio_edid_regs.max_yres = vgpu_edid_yres(port->id);
669 base->edid_blob = port->edid->edid_block;
671 ret = intel_vgpu_register_reg(vgpu,
672 VFIO_REGION_TYPE_GFX,
673 VFIO_REGION_SUBTYPE_GFX_EDID,
674 &intel_vgpu_regops_edid, EDID_SIZE,
675 VFIO_REGION_INFO_FLAG_READ |
676 VFIO_REGION_INFO_FLAG_WRITE |
677 VFIO_REGION_INFO_FLAG_CAPS, base);
682 static void kvmgt_put_vfio_device(void *vgpu)
684 struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu);
686 if (WARN_ON(!vdev->vfio_device))
689 vfio_device_put(vdev->vfio_device);
692 static int intel_vgpu_create(struct mdev_device *mdev)
694 struct intel_vgpu *vgpu = NULL;
695 struct intel_vgpu_type *type;
700 pdev = mdev_parent_dev(mdev);
701 gvt = kdev_to_i915(pdev)->gvt;
703 type = intel_gvt_ops->gvt_find_vgpu_type(gvt,
704 mdev_get_type_group_id(mdev));
710 vgpu = intel_gvt_ops->vgpu_create(gvt, type);
711 if (IS_ERR_OR_NULL(vgpu)) {
712 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
713 gvt_err("failed to create intel vgpu: %d\n", ret);
717 INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work);
719 kvmgt_vdev(vgpu)->mdev = mdev;
720 mdev_set_drvdata(mdev, vgpu);
722 gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
723 dev_name(mdev_dev(mdev)));
730 static int intel_vgpu_remove(struct mdev_device *mdev)
732 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
734 if (handle_valid(vgpu->handle))
737 intel_gvt_ops->vgpu_destroy(vgpu);
741 static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
742 unsigned long action, void *data)
744 struct kvmgt_vdev *vdev = container_of(nb,
747 struct intel_vgpu *vgpu = vdev->vgpu;
749 if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
750 struct vfio_iommu_type1_dma_unmap *unmap = data;
751 struct gvt_dma *entry;
752 unsigned long iov_pfn, end_iov_pfn;
754 iov_pfn = unmap->iova >> PAGE_SHIFT;
755 end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
757 mutex_lock(&vdev->cache_lock);
758 for (; iov_pfn < end_iov_pfn; iov_pfn++) {
759 entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
763 gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
765 __gvt_cache_remove_entry(vgpu, entry);
767 mutex_unlock(&vdev->cache_lock);
773 static int intel_vgpu_group_notifier(struct notifier_block *nb,
774 unsigned long action, void *data)
776 struct kvmgt_vdev *vdev = container_of(nb,
780 /* the only action we care about */
781 if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
785 schedule_work(&vdev->release_work);
791 static int intel_vgpu_open(struct mdev_device *mdev)
793 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
794 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
795 unsigned long events;
797 struct vfio_group *vfio_group;
799 vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
800 vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
802 events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
803 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
804 &vdev->iommu_notifier);
806 gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
811 events = VFIO_GROUP_NOTIFY_SET_KVM;
812 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
813 &vdev->group_notifier);
815 gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
820 vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev));
821 if (IS_ERR_OR_NULL(vfio_group)) {
822 ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group);
823 gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
826 vdev->vfio_group = vfio_group;
828 /* Take a module reference as mdev core doesn't take
829 * a reference for vendor driver.
831 if (!try_module_get(THIS_MODULE)) {
836 ret = kvmgt_guest_init(mdev);
840 intel_gvt_ops->vgpu_activate(vgpu);
842 atomic_set(&vdev->released, 0);
846 vfio_group_put_external_user(vdev->vfio_group);
847 vdev->vfio_group = NULL;
850 vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
851 &vdev->group_notifier);
854 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
855 &vdev->iommu_notifier);
860 static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
862 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
863 struct eventfd_ctx *trigger;
865 trigger = vdev->msi_trigger;
867 eventfd_ctx_put(trigger);
868 vdev->msi_trigger = NULL;
872 static void __intel_vgpu_release(struct intel_vgpu *vgpu)
874 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
875 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
876 struct kvmgt_guest_info *info;
879 if (!handle_valid(vgpu->handle))
882 if (atomic_cmpxchg(&vdev->released, 0, 1))
885 intel_gvt_ops->vgpu_release(vgpu);
887 ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY,
888 &vdev->iommu_notifier);
889 drm_WARN(&i915->drm, ret,
890 "vfio_unregister_notifier for iommu failed: %d\n", ret);
892 ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY,
893 &vdev->group_notifier);
894 drm_WARN(&i915->drm, ret,
895 "vfio_unregister_notifier for group failed: %d\n", ret);
897 /* dereference module reference taken at open */
898 module_put(THIS_MODULE);
900 info = (struct kvmgt_guest_info *)vgpu->handle;
901 kvmgt_guest_exit(info);
903 intel_vgpu_release_msi_eventfd_ctx(vgpu);
904 vfio_group_put_external_user(vdev->vfio_group);
910 static void intel_vgpu_release(struct mdev_device *mdev)
912 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
914 __intel_vgpu_release(vgpu);
917 static void intel_vgpu_release_work(struct work_struct *work)
919 struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev,
922 __intel_vgpu_release(vdev->vgpu);
925 static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
927 u32 start_lo, start_hi;
930 start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
931 PCI_BASE_ADDRESS_MEM_MASK;
932 mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
933 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
936 case PCI_BASE_ADDRESS_MEM_TYPE_64:
937 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
940 case PCI_BASE_ADDRESS_MEM_TYPE_32:
941 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
942 /* 1M mem BAR treated as 32-bit BAR */
944 /* mem unknown type treated as 32-bit BAR */
949 return ((u64)start_hi << 32) | start_lo;
952 static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
953 void *buf, unsigned int count, bool is_write)
955 u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
959 ret = intel_gvt_ops->emulate_mmio_write(vgpu,
960 bar_start + off, buf, count);
962 ret = intel_gvt_ops->emulate_mmio_read(vgpu,
963 bar_start + off, buf, count);
967 static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
969 return off >= vgpu_aperture_offset(vgpu) &&
970 off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
973 static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
974 void *buf, unsigned long count, bool is_write)
976 void __iomem *aperture_va;
978 if (!intel_vgpu_in_aperture(vgpu, off) ||
979 !intel_vgpu_in_aperture(vgpu, off + count)) {
980 gvt_vgpu_err("Invalid aperture offset %llu\n", off);
984 aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap,
985 ALIGN_DOWN(off, PAGE_SIZE),
986 count + offset_in_page(off));
991 memcpy_toio(aperture_va + offset_in_page(off), buf, count);
993 memcpy_fromio(buf, aperture_va + offset_in_page(off), count);
995 io_mapping_unmap(aperture_va);
1000 static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
1001 size_t count, loff_t *ppos, bool is_write)
1003 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1004 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
1005 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1006 u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
1010 if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) {
1011 gvt_vgpu_err("invalid index: %u\n", index);
1016 case VFIO_PCI_CONFIG_REGION_INDEX:
1018 ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
1021 ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
1024 case VFIO_PCI_BAR0_REGION_INDEX:
1025 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
1026 buf, count, is_write);
1028 case VFIO_PCI_BAR2_REGION_INDEX:
1029 ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
1031 case VFIO_PCI_BAR1_REGION_INDEX:
1032 case VFIO_PCI_BAR3_REGION_INDEX:
1033 case VFIO_PCI_BAR4_REGION_INDEX:
1034 case VFIO_PCI_BAR5_REGION_INDEX:
1035 case VFIO_PCI_VGA_REGION_INDEX:
1036 case VFIO_PCI_ROM_REGION_INDEX:
1039 if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
1042 index -= VFIO_PCI_NUM_REGIONS;
1043 return vdev->region[index].ops->rw(vgpu, buf, count,
1047 return ret == 0 ? count : ret;
1050 static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
1052 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1053 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
1054 struct intel_gvt *gvt = vgpu->gvt;
1057 /* Only allow MMIO GGTT entry access */
1058 if (index != PCI_BASE_ADDRESS_0)
1061 offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) -
1062 intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
1064 return (offset >= gvt->device_info.gtt_start_offset &&
1065 offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ?
1069 static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
1070 size_t count, loff_t *ppos)
1072 unsigned int done = 0;
1078 /* Only support GGTT entry 8 bytes read */
1079 if (count >= 8 && !(*ppos % 8) &&
1080 gtt_entry(mdev, ppos)) {
1083 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1088 if (copy_to_user(buf, &val, sizeof(val)))
1092 } else if (count >= 4 && !(*ppos % 4)) {
1095 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1100 if (copy_to_user(buf, &val, sizeof(val)))
1104 } else if (count >= 2 && !(*ppos % 2)) {
1107 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1112 if (copy_to_user(buf, &val, sizeof(val)))
1119 ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
1124 if (copy_to_user(buf, &val, sizeof(val)))
1142 static ssize_t intel_vgpu_write(struct mdev_device *mdev,
1143 const char __user *buf,
1144 size_t count, loff_t *ppos)
1146 unsigned int done = 0;
1152 /* Only support GGTT entry 8 bytes write */
1153 if (count >= 8 && !(*ppos % 8) &&
1154 gtt_entry(mdev, ppos)) {
1157 if (copy_from_user(&val, buf, sizeof(val)))
1160 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1166 } else if (count >= 4 && !(*ppos % 4)) {
1169 if (copy_from_user(&val, buf, sizeof(val)))
1172 ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1178 } else if (count >= 2 && !(*ppos % 2)) {
1181 if (copy_from_user(&val, buf, sizeof(val)))
1184 ret = intel_vgpu_rw(mdev, (char *)&val,
1185 sizeof(val), ppos, true);
1193 if (copy_from_user(&val, buf, sizeof(val)))
1196 ret = intel_vgpu_rw(mdev, &val, sizeof(val),
1215 static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
1219 unsigned long req_size, pgoff, req_start;
1221 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1223 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1224 if (index >= VFIO_PCI_ROM_REGION_INDEX)
1227 if (vma->vm_end < vma->vm_start)
1229 if ((vma->vm_flags & VM_SHARED) == 0)
1231 if (index != VFIO_PCI_BAR2_REGION_INDEX)
1234 pg_prot = vma->vm_page_prot;
1235 virtaddr = vma->vm_start;
1236 req_size = vma->vm_end - vma->vm_start;
1237 pgoff = vma->vm_pgoff &
1238 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1239 req_start = pgoff << PAGE_SHIFT;
1241 if (!intel_vgpu_in_aperture(vgpu, req_start))
1243 if (req_start + req_size >
1244 vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
1247 pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
1249 return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
1252 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
1254 if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
1260 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
1261 unsigned int index, unsigned int start,
1262 unsigned int count, u32 flags,
1268 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
1269 unsigned int index, unsigned int start,
1270 unsigned int count, u32 flags, void *data)
1275 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
1276 unsigned int index, unsigned int start, unsigned int count,
1277 u32 flags, void *data)
1282 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
1283 unsigned int index, unsigned int start, unsigned int count,
1284 u32 flags, void *data)
1286 struct eventfd_ctx *trigger;
1288 if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
1289 int fd = *(int *)data;
1291 trigger = eventfd_ctx_fdget(fd);
1292 if (IS_ERR(trigger)) {
1293 gvt_vgpu_err("eventfd_ctx_fdget failed\n");
1294 return PTR_ERR(trigger);
1296 kvmgt_vdev(vgpu)->msi_trigger = trigger;
1297 } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
1298 intel_vgpu_release_msi_eventfd_ctx(vgpu);
1303 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
1304 unsigned int index, unsigned int start, unsigned int count,
1307 int (*func)(struct intel_vgpu *vgpu, unsigned int index,
1308 unsigned int start, unsigned int count, u32 flags,
1312 case VFIO_PCI_INTX_IRQ_INDEX:
1313 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1314 case VFIO_IRQ_SET_ACTION_MASK:
1315 func = intel_vgpu_set_intx_mask;
1317 case VFIO_IRQ_SET_ACTION_UNMASK:
1318 func = intel_vgpu_set_intx_unmask;
1320 case VFIO_IRQ_SET_ACTION_TRIGGER:
1321 func = intel_vgpu_set_intx_trigger;
1325 case VFIO_PCI_MSI_IRQ_INDEX:
1326 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1327 case VFIO_IRQ_SET_ACTION_MASK:
1328 case VFIO_IRQ_SET_ACTION_UNMASK:
1329 /* XXX Need masking support exported */
1331 case VFIO_IRQ_SET_ACTION_TRIGGER:
1332 func = intel_vgpu_set_msi_trigger;
1341 return func(vgpu, index, start, count, flags, data);
1344 static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1347 struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1348 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
1349 unsigned long minsz;
1351 gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
1353 if (cmd == VFIO_DEVICE_GET_INFO) {
1354 struct vfio_device_info info;
1356 minsz = offsetofend(struct vfio_device_info, num_irqs);
1358 if (copy_from_user(&info, (void __user *)arg, minsz))
1361 if (info.argsz < minsz)
1364 info.flags = VFIO_DEVICE_FLAGS_PCI;
1365 info.flags |= VFIO_DEVICE_FLAGS_RESET;
1366 info.num_regions = VFIO_PCI_NUM_REGIONS +
1368 info.num_irqs = VFIO_PCI_NUM_IRQS;
1370 return copy_to_user((void __user *)arg, &info, minsz) ?
1373 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1374 struct vfio_region_info info;
1375 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
1378 struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
1382 minsz = offsetofend(struct vfio_region_info, offset);
1384 if (copy_from_user(&info, (void __user *)arg, minsz))
1387 if (info.argsz < minsz)
1390 switch (info.index) {
1391 case VFIO_PCI_CONFIG_REGION_INDEX:
1392 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1393 info.size = vgpu->gvt->device_info.cfg_space_size;
1394 info.flags = VFIO_REGION_INFO_FLAG_READ |
1395 VFIO_REGION_INFO_FLAG_WRITE;
1397 case VFIO_PCI_BAR0_REGION_INDEX:
1398 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1399 info.size = vgpu->cfg_space.bar[info.index].size;
1405 info.flags = VFIO_REGION_INFO_FLAG_READ |
1406 VFIO_REGION_INFO_FLAG_WRITE;
1408 case VFIO_PCI_BAR1_REGION_INDEX:
1409 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1413 case VFIO_PCI_BAR2_REGION_INDEX:
1414 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1415 info.flags = VFIO_REGION_INFO_FLAG_CAPS |
1416 VFIO_REGION_INFO_FLAG_MMAP |
1417 VFIO_REGION_INFO_FLAG_READ |
1418 VFIO_REGION_INFO_FLAG_WRITE;
1419 info.size = gvt_aperture_sz(vgpu->gvt);
1421 sparse = kzalloc(struct_size(sparse, areas, nr_areas),
1426 sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1427 sparse->header.version = 1;
1428 sparse->nr_areas = nr_areas;
1429 cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1430 sparse->areas[0].offset =
1431 PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1432 sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1435 case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1436 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1440 gvt_dbg_core("get region info bar:%d\n", info.index);
1443 case VFIO_PCI_ROM_REGION_INDEX:
1444 case VFIO_PCI_VGA_REGION_INDEX:
1445 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1449 gvt_dbg_core("get region info index:%d\n", info.index);
1453 struct vfio_region_info_cap_type cap_type = {
1454 .header.id = VFIO_REGION_INFO_CAP_TYPE,
1455 .header.version = 1 };
1457 if (info.index >= VFIO_PCI_NUM_REGIONS +
1461 array_index_nospec(info.index,
1462 VFIO_PCI_NUM_REGIONS +
1465 i = info.index - VFIO_PCI_NUM_REGIONS;
1468 VFIO_PCI_INDEX_TO_OFFSET(info.index);
1469 info.size = vdev->region[i].size;
1470 info.flags = vdev->region[i].flags;
1472 cap_type.type = vdev->region[i].type;
1473 cap_type.subtype = vdev->region[i].subtype;
1475 ret = vfio_info_add_capability(&caps,
1483 if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
1484 switch (cap_type_id) {
1485 case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
1486 ret = vfio_info_add_capability(&caps,
1488 struct_size(sparse, areas,
1502 info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
1503 if (info.argsz < sizeof(info) + caps.size) {
1504 info.argsz = sizeof(info) + caps.size;
1505 info.cap_offset = 0;
1507 vfio_info_cap_shift(&caps, sizeof(info));
1508 if (copy_to_user((void __user *)arg +
1509 sizeof(info), caps.buf,
1515 info.cap_offset = sizeof(info);
1522 return copy_to_user((void __user *)arg, &info, minsz) ?
1524 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1525 struct vfio_irq_info info;
1527 minsz = offsetofend(struct vfio_irq_info, count);
1529 if (copy_from_user(&info, (void __user *)arg, minsz))
1532 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1535 switch (info.index) {
1536 case VFIO_PCI_INTX_IRQ_INDEX:
1537 case VFIO_PCI_MSI_IRQ_INDEX:
1543 info.flags = VFIO_IRQ_INFO_EVENTFD;
1545 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1547 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1548 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1549 VFIO_IRQ_INFO_AUTOMASKED);
1551 info.flags |= VFIO_IRQ_INFO_NORESIZE;
1553 return copy_to_user((void __user *)arg, &info, minsz) ?
1555 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1556 struct vfio_irq_set hdr;
1559 size_t data_size = 0;
1561 minsz = offsetofend(struct vfio_irq_set, count);
1563 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1566 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
1567 int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1569 ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1570 VFIO_PCI_NUM_IRQS, &data_size);
1572 gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1576 data = memdup_user((void __user *)(arg + minsz),
1579 return PTR_ERR(data);
1583 ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1584 hdr.start, hdr.count, data);
1588 } else if (cmd == VFIO_DEVICE_RESET) {
1589 intel_gvt_ops->vgpu_reset(vgpu);
1591 } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
1592 struct vfio_device_gfx_plane_info dmabuf;
1595 minsz = offsetofend(struct vfio_device_gfx_plane_info,
1597 if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
1599 if (dmabuf.argsz < minsz)
1602 ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
1606 return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
1608 } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
1612 if (get_user(dmabuf_id, (__u32 __user *)arg))
1615 dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
1624 vgpu_id_show(struct device *dev, struct device_attribute *attr,
1627 struct mdev_device *mdev = mdev_from_dev(dev);
1630 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1631 mdev_get_drvdata(mdev);
1632 return sprintf(buf, "%d\n", vgpu->id);
1634 return sprintf(buf, "\n");
1637 static DEVICE_ATTR_RO(vgpu_id);
1639 static struct attribute *intel_vgpu_attrs[] = {
1640 &dev_attr_vgpu_id.attr,
1644 static const struct attribute_group intel_vgpu_group = {
1645 .name = "intel_vgpu",
1646 .attrs = intel_vgpu_attrs,
1649 static const struct attribute_group *intel_vgpu_groups[] = {
1654 static struct mdev_parent_ops intel_vgpu_ops = {
1655 .mdev_attr_groups = intel_vgpu_groups,
1656 .create = intel_vgpu_create,
1657 .remove = intel_vgpu_remove,
1659 .open = intel_vgpu_open,
1660 .release = intel_vgpu_release,
1662 .read = intel_vgpu_read,
1663 .write = intel_vgpu_write,
1664 .mmap = intel_vgpu_mmap,
1665 .ioctl = intel_vgpu_ioctl,
1668 static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
1670 struct attribute_group **kvm_vgpu_type_groups;
1672 intel_gvt_ops = ops;
1673 if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups))
1675 intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
1677 return mdev_register_device(dev, &intel_vgpu_ops);
1680 static void kvmgt_host_exit(struct device *dev)
1682 mdev_unregister_device(dev);
1685 static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
1687 struct kvmgt_guest_info *info;
1689 struct kvm_memory_slot *slot;
1692 if (!handle_valid(handle))
1695 info = (struct kvmgt_guest_info *)handle;
1698 idx = srcu_read_lock(&kvm->srcu);
1699 slot = gfn_to_memslot(kvm, gfn);
1701 srcu_read_unlock(&kvm->srcu, idx);
1705 write_lock(&kvm->mmu_lock);
1707 if (kvmgt_gfn_is_write_protected(info, gfn))
1710 kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1711 kvmgt_protect_table_add(info, gfn);
1714 write_unlock(&kvm->mmu_lock);
1715 srcu_read_unlock(&kvm->srcu, idx);
1719 static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
1721 struct kvmgt_guest_info *info;
1723 struct kvm_memory_slot *slot;
1726 if (!handle_valid(handle))
1729 info = (struct kvmgt_guest_info *)handle;
1732 idx = srcu_read_lock(&kvm->srcu);
1733 slot = gfn_to_memslot(kvm, gfn);
1735 srcu_read_unlock(&kvm->srcu, idx);
1739 write_lock(&kvm->mmu_lock);
1741 if (!kvmgt_gfn_is_write_protected(info, gfn))
1744 kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1745 kvmgt_protect_table_del(info, gfn);
1748 write_unlock(&kvm->mmu_lock);
1749 srcu_read_unlock(&kvm->srcu, idx);
1753 static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1754 const u8 *val, int len,
1755 struct kvm_page_track_notifier_node *node)
1757 struct kvmgt_guest_info *info = container_of(node,
1758 struct kvmgt_guest_info, track_node);
1760 if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
1761 intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
1765 static void kvmgt_page_track_flush_slot(struct kvm *kvm,
1766 struct kvm_memory_slot *slot,
1767 struct kvm_page_track_notifier_node *node)
1771 struct kvmgt_guest_info *info = container_of(node,
1772 struct kvmgt_guest_info, track_node);
1774 write_lock(&kvm->mmu_lock);
1775 for (i = 0; i < slot->npages; i++) {
1776 gfn = slot->base_gfn + i;
1777 if (kvmgt_gfn_is_write_protected(info, gfn)) {
1778 kvm_slot_page_track_remove_page(kvm, slot, gfn,
1779 KVM_PAGE_TRACK_WRITE);
1780 kvmgt_protect_table_del(info, gfn);
1783 write_unlock(&kvm->mmu_lock);
1786 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1788 struct intel_vgpu *itr;
1789 struct kvmgt_guest_info *info;
1793 mutex_lock(&vgpu->gvt->lock);
1794 for_each_active_vgpu(vgpu->gvt, itr, id) {
1795 if (!handle_valid(itr->handle))
1798 info = (struct kvmgt_guest_info *)itr->handle;
1799 if (kvm && kvm == info->kvm) {
1805 mutex_unlock(&vgpu->gvt->lock);
1809 static int kvmgt_guest_init(struct mdev_device *mdev)
1811 struct kvmgt_guest_info *info;
1812 struct intel_vgpu *vgpu;
1813 struct kvmgt_vdev *vdev;
1816 vgpu = mdev_get_drvdata(mdev);
1817 if (handle_valid(vgpu->handle))
1820 vdev = kvmgt_vdev(vgpu);
1822 if (!kvm || kvm->mm != current->mm) {
1823 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1827 if (__kvmgt_vgpu_exist(vgpu, kvm))
1830 info = vzalloc(sizeof(struct kvmgt_guest_info));
1834 vgpu->handle = (unsigned long)info;
1837 kvm_get_kvm(info->kvm);
1839 kvmgt_protect_table_init(info);
1840 gvt_cache_init(vgpu);
1842 info->track_node.track_write = kvmgt_page_track_write;
1843 info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
1844 kvm_page_track_register_notifier(kvm, &info->track_node);
1846 info->debugfs_cache_entries = debugfs_create_ulong(
1847 "kvmgt_nr_cache_entries",
1848 0444, vgpu->debugfs,
1849 &vdev->nr_cache_entries);
1853 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1855 debugfs_remove(info->debugfs_cache_entries);
1857 kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1858 kvm_put_kvm(info->kvm);
1859 kvmgt_protect_table_destroy(info);
1860 gvt_cache_destroy(info->vgpu);
1866 static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle)
1868 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
1870 vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL);
1875 kvmgt_vdev(vgpu)->vgpu = vgpu;
1880 static void kvmgt_detach_vgpu(void *p_vgpu)
1883 struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
1884 struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
1889 for (i = 0; i < vdev->num_regions; i++)
1890 if (vdev->region[i].ops->release)
1891 vdev->region[i].ops->release(vgpu,
1893 vdev->num_regions = 0;
1894 kfree(vdev->region);
1895 vdev->region = NULL;
1900 static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
1902 struct kvmgt_guest_info *info;
1903 struct intel_vgpu *vgpu;
1904 struct kvmgt_vdev *vdev;
1906 if (!handle_valid(handle))
1909 info = (struct kvmgt_guest_info *)handle;
1911 vdev = kvmgt_vdev(vgpu);
1914 * When guest is poweroff, msi_trigger is set to NULL, but vgpu's
1915 * config and mmio register isn't restored to default during guest
1916 * poweroff. If this vgpu is still used in next vm, this vgpu's pipe
1917 * may be enabled, then once this vgpu is active, it will get inject
1918 * vblank interrupt request. But msi_trigger is null until msi is
1919 * enabled by guest. so if msi_trigger is null, success is still
1920 * returned and don't inject interrupt into guest.
1922 if (vdev->msi_trigger == NULL)
1925 if (eventfd_signal(vdev->msi_trigger, 1) == 1)
1931 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1933 struct kvmgt_guest_info *info;
1936 if (!handle_valid(handle))
1937 return INTEL_GVT_INVALID_ADDR;
1939 info = (struct kvmgt_guest_info *)handle;
1941 pfn = gfn_to_pfn(info->kvm, gfn);
1942 if (is_error_noslot_pfn(pfn))
1943 return INTEL_GVT_INVALID_ADDR;
1948 static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
1949 unsigned long size, dma_addr_t *dma_addr)
1951 struct intel_vgpu *vgpu;
1952 struct kvmgt_vdev *vdev;
1953 struct gvt_dma *entry;
1956 if (!handle_valid(handle))
1959 vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
1960 vdev = kvmgt_vdev(vgpu);
1962 mutex_lock(&vdev->cache_lock);
1964 entry = __gvt_cache_find_gfn(vgpu, gfn);
1966 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1970 ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
1973 } else if (entry->size != size) {
1974 /* the same gfn with different size: unmap and re-map */
1975 gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
1976 __gvt_cache_remove_entry(vgpu, entry);
1978 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1982 ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
1986 kref_get(&entry->ref);
1987 *dma_addr = entry->dma_addr;
1990 mutex_unlock(&vdev->cache_lock);
1994 gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
1996 mutex_unlock(&vdev->cache_lock);
2000 static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
2002 struct kvmgt_guest_info *info;
2003 struct kvmgt_vdev *vdev;
2004 struct gvt_dma *entry;
2007 if (!handle_valid(handle))
2010 info = (struct kvmgt_guest_info *)handle;
2011 vdev = kvmgt_vdev(info->vgpu);
2013 mutex_lock(&vdev->cache_lock);
2014 entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
2016 kref_get(&entry->ref);
2019 mutex_unlock(&vdev->cache_lock);
2024 static void __gvt_dma_release(struct kref *ref)
2026 struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
2028 gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
2030 __gvt_cache_remove_entry(entry->vgpu, entry);
2033 static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
2035 struct intel_vgpu *vgpu;
2036 struct kvmgt_vdev *vdev;
2037 struct gvt_dma *entry;
2039 if (!handle_valid(handle))
2042 vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
2043 vdev = kvmgt_vdev(vgpu);
2045 mutex_lock(&vdev->cache_lock);
2046 entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
2048 kref_put(&entry->ref, __gvt_dma_release);
2049 mutex_unlock(&vdev->cache_lock);
2052 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
2053 void *buf, unsigned long len, bool write)
2055 struct kvmgt_guest_info *info;
2057 if (!handle_valid(handle))
2060 info = (struct kvmgt_guest_info *)handle;
2062 return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group,
2063 gpa, buf, len, write);
2066 static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
2067 void *buf, unsigned long len)
2069 return kvmgt_rw_gpa(handle, gpa, buf, len, false);
2072 static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
2073 void *buf, unsigned long len)
2075 return kvmgt_rw_gpa(handle, gpa, buf, len, true);
2078 static unsigned long kvmgt_virt_to_pfn(void *addr)
2080 return PFN_DOWN(__pa(addr));
2083 static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
2085 struct kvmgt_guest_info *info;
2090 if (!handle_valid(handle))
2093 info = (struct kvmgt_guest_info *)handle;
2096 idx = srcu_read_lock(&kvm->srcu);
2097 ret = kvm_is_visible_gfn(kvm, gfn);
2098 srcu_read_unlock(&kvm->srcu, idx);
2103 static const struct intel_gvt_mpt kvmgt_mpt = {
2104 .type = INTEL_GVT_HYPERVISOR_KVM,
2105 .host_init = kvmgt_host_init,
2106 .host_exit = kvmgt_host_exit,
2107 .attach_vgpu = kvmgt_attach_vgpu,
2108 .detach_vgpu = kvmgt_detach_vgpu,
2109 .inject_msi = kvmgt_inject_msi,
2110 .from_virt_to_mfn = kvmgt_virt_to_pfn,
2111 .enable_page_track = kvmgt_page_track_add,
2112 .disable_page_track = kvmgt_page_track_remove,
2113 .read_gpa = kvmgt_read_gpa,
2114 .write_gpa = kvmgt_write_gpa,
2115 .gfn_to_mfn = kvmgt_gfn_to_pfn,
2116 .dma_map_guest_page = kvmgt_dma_map_guest_page,
2117 .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
2118 .dma_pin_guest_page = kvmgt_dma_pin_guest_page,
2119 .set_opregion = kvmgt_set_opregion,
2120 .set_edid = kvmgt_set_edid,
2121 .get_vfio_device = kvmgt_get_vfio_device,
2122 .put_vfio_device = kvmgt_put_vfio_device,
2123 .is_valid_gfn = kvmgt_is_valid_gfn,
2126 static int __init kvmgt_init(void)
2128 if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0)
2133 static void __exit kvmgt_exit(void)
2135 intel_gvt_unregister_hypervisor();
2138 module_init(kvmgt_init);
2139 module_exit(kvmgt_exit);
2141 MODULE_LICENSE("GPL and additional rights");
2142 MODULE_AUTHOR("Intel Corporation");