drm/i915/gvt: remove unused vblank_done completion
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gvt / kvmgt.c
1 /*
2  * KVMGT - the implementation of Intel mediated pass-through framework for KVM
3  *
4  * Copyright(c) 2014-2016 Intel Corporation. All rights reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  *
25  * Authors:
26  *    Kevin Tian <kevin.tian@intel.com>
27  *    Jike Song <jike.song@intel.com>
28  *    Xiaoguang Chen <xiaoguang.chen@intel.com>
29  */
30
31 #include <linux/init.h>
32 #include <linux/device.h>
33 #include <linux/mm.h>
34 #include <linux/mmu_context.h>
35 #include <linux/sched/mm.h>
36 #include <linux/types.h>
37 #include <linux/list.h>
38 #include <linux/rbtree.h>
39 #include <linux/spinlock.h>
40 #include <linux/eventfd.h>
41 #include <linux/uuid.h>
42 #include <linux/kvm_host.h>
43 #include <linux/vfio.h>
44 #include <linux/mdev.h>
45 #include <linux/debugfs.h>
46
47 #include <linux/nospec.h>
48
49 #include "i915_drv.h"
50 #include "gvt.h"
51
52 static const struct intel_gvt_ops *intel_gvt_ops;
53
54 /* helper macros copied from vfio-pci */
55 #define VFIO_PCI_OFFSET_SHIFT   40
56 #define VFIO_PCI_OFFSET_TO_INDEX(off)   (off >> VFIO_PCI_OFFSET_SHIFT)
57 #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT)
58 #define VFIO_PCI_OFFSET_MASK    (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1)
59
60 #define EDID_BLOB_OFFSET (PAGE_SIZE/2)
61
62 #define OPREGION_SIGNATURE "IntelGraphicsMem"
63
64 struct vfio_region;
65 struct intel_vgpu_regops {
66         size_t (*rw)(struct intel_vgpu *vgpu, char *buf,
67                         size_t count, loff_t *ppos, bool iswrite);
68         void (*release)(struct intel_vgpu *vgpu,
69                         struct vfio_region *region);
70 };
71
72 struct vfio_region {
73         u32                             type;
74         u32                             subtype;
75         size_t                          size;
76         u32                             flags;
77         const struct intel_vgpu_regops  *ops;
78         void                            *data;
79 };
80
81 struct vfio_edid_region {
82         struct vfio_region_gfx_edid vfio_edid_regs;
83         void *edid_blob;
84 };
85
86 struct kvmgt_pgfn {
87         gfn_t gfn;
88         struct hlist_node hnode;
89 };
90
91 struct kvmgt_guest_info {
92         struct kvm *kvm;
93         struct intel_vgpu *vgpu;
94         struct kvm_page_track_notifier_node track_node;
95 #define NR_BKT (1 << 18)
96         struct hlist_head ptable[NR_BKT];
97 #undef NR_BKT
98         struct dentry *debugfs_cache_entries;
99 };
100
101 struct gvt_dma {
102         struct intel_vgpu *vgpu;
103         struct rb_node gfn_node;
104         struct rb_node dma_addr_node;
105         gfn_t gfn;
106         dma_addr_t dma_addr;
107         unsigned long size;
108         struct kref ref;
109 };
110
111 static inline bool handle_valid(unsigned long handle)
112 {
113         return !!(handle & ~0xff);
114 }
115
116 static int kvmgt_guest_init(struct mdev_device *mdev);
117 static void intel_vgpu_release_work(struct work_struct *work);
118 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
119
120 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
121                 unsigned long size)
122 {
123         int total_pages;
124         int npage;
125         int ret;
126
127         total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
128
129         for (npage = 0; npage < total_pages; npage++) {
130                 unsigned long cur_gfn = gfn + npage;
131
132                 ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
133                 WARN_ON(ret != 1);
134         }
135 }
136
137 /* Pin a normal or compound guest page for dma. */
138 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
139                 unsigned long size, struct page **page)
140 {
141         unsigned long base_pfn = 0;
142         int total_pages;
143         int npage;
144         int ret;
145
146         total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
147         /*
148          * We pin the pages one-by-one to avoid allocating a big arrary
149          * on stack to hold pfns.
150          */
151         for (npage = 0; npage < total_pages; npage++) {
152                 unsigned long cur_gfn = gfn + npage;
153                 unsigned long pfn;
154
155                 ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
156                                      IOMMU_READ | IOMMU_WRITE, &pfn);
157                 if (ret != 1) {
158                         gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
159                                      cur_gfn, ret);
160                         goto err;
161                 }
162
163                 if (!pfn_valid(pfn)) {
164                         gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
165                         npage++;
166                         ret = -EFAULT;
167                         goto err;
168                 }
169
170                 if (npage == 0)
171                         base_pfn = pfn;
172                 else if (base_pfn + npage != pfn) {
173                         gvt_vgpu_err("The pages are not continuous\n");
174                         ret = -EINVAL;
175                         npage++;
176                         goto err;
177                 }
178         }
179
180         *page = pfn_to_page(base_pfn);
181         return 0;
182 err:
183         gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
184         return ret;
185 }
186
187 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
188                 dma_addr_t *dma_addr, unsigned long size)
189 {
190         struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
191         struct page *page = NULL;
192         int ret;
193
194         ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
195         if (ret)
196                 return ret;
197
198         /* Setup DMA mapping. */
199         *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
200         if (dma_mapping_error(dev, *dma_addr)) {
201                 gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
202                              page_to_pfn(page), ret);
203                 gvt_unpin_guest_page(vgpu, gfn, size);
204                 return -ENOMEM;
205         }
206
207         return 0;
208 }
209
210 static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
211                 dma_addr_t dma_addr, unsigned long size)
212 {
213         struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
214
215         dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
216         gvt_unpin_guest_page(vgpu, gfn, size);
217 }
218
219 static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
220                 dma_addr_t dma_addr)
221 {
222         struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node;
223         struct gvt_dma *itr;
224
225         while (node) {
226                 itr = rb_entry(node, struct gvt_dma, dma_addr_node);
227
228                 if (dma_addr < itr->dma_addr)
229                         node = node->rb_left;
230                 else if (dma_addr > itr->dma_addr)
231                         node = node->rb_right;
232                 else
233                         return itr;
234         }
235         return NULL;
236 }
237
238 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
239 {
240         struct rb_node *node = vgpu->vdev.gfn_cache.rb_node;
241         struct gvt_dma *itr;
242
243         while (node) {
244                 itr = rb_entry(node, struct gvt_dma, gfn_node);
245
246                 if (gfn < itr->gfn)
247                         node = node->rb_left;
248                 else if (gfn > itr->gfn)
249                         node = node->rb_right;
250                 else
251                         return itr;
252         }
253         return NULL;
254 }
255
256 static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
257                 dma_addr_t dma_addr, unsigned long size)
258 {
259         struct gvt_dma *new, *itr;
260         struct rb_node **link, *parent = NULL;
261
262         new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
263         if (!new)
264                 return -ENOMEM;
265
266         new->vgpu = vgpu;
267         new->gfn = gfn;
268         new->dma_addr = dma_addr;
269         new->size = size;
270         kref_init(&new->ref);
271
272         /* gfn_cache maps gfn to struct gvt_dma. */
273         link = &vgpu->vdev.gfn_cache.rb_node;
274         while (*link) {
275                 parent = *link;
276                 itr = rb_entry(parent, struct gvt_dma, gfn_node);
277
278                 if (gfn < itr->gfn)
279                         link = &parent->rb_left;
280                 else
281                         link = &parent->rb_right;
282         }
283         rb_link_node(&new->gfn_node, parent, link);
284         rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache);
285
286         /* dma_addr_cache maps dma addr to struct gvt_dma. */
287         parent = NULL;
288         link = &vgpu->vdev.dma_addr_cache.rb_node;
289         while (*link) {
290                 parent = *link;
291                 itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
292
293                 if (dma_addr < itr->dma_addr)
294                         link = &parent->rb_left;
295                 else
296                         link = &parent->rb_right;
297         }
298         rb_link_node(&new->dma_addr_node, parent, link);
299         rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
300
301         vgpu->vdev.nr_cache_entries++;
302         return 0;
303 }
304
305 static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
306                                 struct gvt_dma *entry)
307 {
308         rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache);
309         rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache);
310         kfree(entry);
311         vgpu->vdev.nr_cache_entries--;
312 }
313
314 static void gvt_cache_destroy(struct intel_vgpu *vgpu)
315 {
316         struct gvt_dma *dma;
317         struct rb_node *node = NULL;
318
319         for (;;) {
320                 mutex_lock(&vgpu->vdev.cache_lock);
321                 node = rb_first(&vgpu->vdev.gfn_cache);
322                 if (!node) {
323                         mutex_unlock(&vgpu->vdev.cache_lock);
324                         break;
325                 }
326                 dma = rb_entry(node, struct gvt_dma, gfn_node);
327                 gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
328                 __gvt_cache_remove_entry(vgpu, dma);
329                 mutex_unlock(&vgpu->vdev.cache_lock);
330         }
331 }
332
333 static void gvt_cache_init(struct intel_vgpu *vgpu)
334 {
335         vgpu->vdev.gfn_cache = RB_ROOT;
336         vgpu->vdev.dma_addr_cache = RB_ROOT;
337         vgpu->vdev.nr_cache_entries = 0;
338         mutex_init(&vgpu->vdev.cache_lock);
339 }
340
341 static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
342 {
343         hash_init(info->ptable);
344 }
345
346 static void kvmgt_protect_table_destroy(struct kvmgt_guest_info *info)
347 {
348         struct kvmgt_pgfn *p;
349         struct hlist_node *tmp;
350         int i;
351
352         hash_for_each_safe(info->ptable, i, tmp, p, hnode) {
353                 hash_del(&p->hnode);
354                 kfree(p);
355         }
356 }
357
358 static struct kvmgt_pgfn *
359 __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn)
360 {
361         struct kvmgt_pgfn *p, *res = NULL;
362
363         hash_for_each_possible(info->ptable, p, hnode, gfn) {
364                 if (gfn == p->gfn) {
365                         res = p;
366                         break;
367                 }
368         }
369
370         return res;
371 }
372
373 static bool kvmgt_gfn_is_write_protected(struct kvmgt_guest_info *info,
374                                 gfn_t gfn)
375 {
376         struct kvmgt_pgfn *p;
377
378         p = __kvmgt_protect_table_find(info, gfn);
379         return !!p;
380 }
381
382 static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn)
383 {
384         struct kvmgt_pgfn *p;
385
386         if (kvmgt_gfn_is_write_protected(info, gfn))
387                 return;
388
389         p = kzalloc(sizeof(struct kvmgt_pgfn), GFP_ATOMIC);
390         if (WARN(!p, "gfn: 0x%llx\n", gfn))
391                 return;
392
393         p->gfn = gfn;
394         hash_add(info->ptable, &p->hnode, gfn);
395 }
396
397 static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
398                                 gfn_t gfn)
399 {
400         struct kvmgt_pgfn *p;
401
402         p = __kvmgt_protect_table_find(info, gfn);
403         if (p) {
404                 hash_del(&p->hnode);
405                 kfree(p);
406         }
407 }
408
409 static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
410                 size_t count, loff_t *ppos, bool iswrite)
411 {
412         unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
413                         VFIO_PCI_NUM_REGIONS;
414         void *base = vgpu->vdev.region[i].data;
415         loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
416
417         if (pos >= vgpu->vdev.region[i].size || iswrite) {
418                 gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
419                 return -EINVAL;
420         }
421         count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
422         memcpy(buf, base + pos, count);
423
424         return count;
425 }
426
427 static void intel_vgpu_reg_release_opregion(struct intel_vgpu *vgpu,
428                 struct vfio_region *region)
429 {
430 }
431
432 static const struct intel_vgpu_regops intel_vgpu_regops_opregion = {
433         .rw = intel_vgpu_reg_rw_opregion,
434         .release = intel_vgpu_reg_release_opregion,
435 };
436
437 static int handle_edid_regs(struct intel_vgpu *vgpu,
438                         struct vfio_edid_region *region, char *buf,
439                         size_t count, u16 offset, bool is_write)
440 {
441         struct vfio_region_gfx_edid *regs = &region->vfio_edid_regs;
442         unsigned int data;
443
444         if (offset + count > sizeof(*regs))
445                 return -EINVAL;
446
447         if (count != 4)
448                 return -EINVAL;
449
450         if (is_write) {
451                 data = *((unsigned int *)buf);
452                 switch (offset) {
453                 case offsetof(struct vfio_region_gfx_edid, link_state):
454                         if (data == VFIO_DEVICE_GFX_LINK_STATE_UP) {
455                                 if (!drm_edid_block_valid(
456                                         (u8 *)region->edid_blob,
457                                         0,
458                                         true,
459                                         NULL)) {
460                                         gvt_vgpu_err("invalid EDID blob\n");
461                                         return -EINVAL;
462                                 }
463                                 intel_gvt_ops->emulate_hotplug(vgpu, true);
464                         } else if (data == VFIO_DEVICE_GFX_LINK_STATE_DOWN)
465                                 intel_gvt_ops->emulate_hotplug(vgpu, false);
466                         else {
467                                 gvt_vgpu_err("invalid EDID link state %d\n",
468                                         regs->link_state);
469                                 return -EINVAL;
470                         }
471                         regs->link_state = data;
472                         break;
473                 case offsetof(struct vfio_region_gfx_edid, edid_size):
474                         if (data > regs->edid_max_size) {
475                                 gvt_vgpu_err("EDID size is bigger than %d!\n",
476                                         regs->edid_max_size);
477                                 return -EINVAL;
478                         }
479                         regs->edid_size = data;
480                         break;
481                 default:
482                         /* read-only regs */
483                         gvt_vgpu_err("write read-only EDID region at offset %d\n",
484                                 offset);
485                         return -EPERM;
486                 }
487         } else {
488                 memcpy(buf, (char *)regs + offset, count);
489         }
490
491         return count;
492 }
493
494 static int handle_edid_blob(struct vfio_edid_region *region, char *buf,
495                         size_t count, u16 offset, bool is_write)
496 {
497         if (offset + count > region->vfio_edid_regs.edid_size)
498                 return -EINVAL;
499
500         if (is_write)
501                 memcpy(region->edid_blob + offset, buf, count);
502         else
503                 memcpy(buf, region->edid_blob + offset, count);
504
505         return count;
506 }
507
508 static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
509                 size_t count, loff_t *ppos, bool iswrite)
510 {
511         int ret;
512         unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
513                         VFIO_PCI_NUM_REGIONS;
514         struct vfio_edid_region *region =
515                 (struct vfio_edid_region *)vgpu->vdev.region[i].data;
516         loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
517
518         if (pos < region->vfio_edid_regs.edid_offset) {
519                 ret = handle_edid_regs(vgpu, region, buf, count, pos, iswrite);
520         } else {
521                 pos -= EDID_BLOB_OFFSET;
522                 ret = handle_edid_blob(region, buf, count, pos, iswrite);
523         }
524
525         if (ret < 0)
526                 gvt_vgpu_err("failed to access EDID region\n");
527
528         return ret;
529 }
530
531 static void intel_vgpu_reg_release_edid(struct intel_vgpu *vgpu,
532                                         struct vfio_region *region)
533 {
534         kfree(region->data);
535 }
536
537 static const struct intel_vgpu_regops intel_vgpu_regops_edid = {
538         .rw = intel_vgpu_reg_rw_edid,
539         .release = intel_vgpu_reg_release_edid,
540 };
541
542 static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
543                 unsigned int type, unsigned int subtype,
544                 const struct intel_vgpu_regops *ops,
545                 size_t size, u32 flags, void *data)
546 {
547         struct vfio_region *region;
548
549         region = krealloc(vgpu->vdev.region,
550                         (vgpu->vdev.num_regions + 1) * sizeof(*region),
551                         GFP_KERNEL);
552         if (!region)
553                 return -ENOMEM;
554
555         vgpu->vdev.region = region;
556         vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
557         vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
558         vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
559         vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
560         vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
561         vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
562         vgpu->vdev.num_regions++;
563         return 0;
564 }
565
566 static int kvmgt_get_vfio_device(void *p_vgpu)
567 {
568         struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
569
570         vgpu->vdev.vfio_device = vfio_device_get_from_dev(
571                 mdev_dev(vgpu->vdev.mdev));
572         if (!vgpu->vdev.vfio_device) {
573                 gvt_vgpu_err("failed to get vfio device\n");
574                 return -ENODEV;
575         }
576         return 0;
577 }
578
579
580 static int kvmgt_set_opregion(void *p_vgpu)
581 {
582         struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
583         void *base;
584         int ret;
585
586         /* Each vgpu has its own opregion, although VFIO would create another
587          * one later. This one is used to expose opregion to VFIO. And the
588          * other one created by VFIO later, is used by guest actually.
589          */
590         base = vgpu_opregion(vgpu)->va;
591         if (!base)
592                 return -ENOMEM;
593
594         if (memcmp(base, OPREGION_SIGNATURE, 16)) {
595                 memunmap(base);
596                 return -EINVAL;
597         }
598
599         ret = intel_vgpu_register_reg(vgpu,
600                         PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
601                         VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
602                         &intel_vgpu_regops_opregion, OPREGION_SIZE,
603                         VFIO_REGION_INFO_FLAG_READ, base);
604
605         return ret;
606 }
607
608 static int kvmgt_set_edid(void *p_vgpu, int port_num)
609 {
610         struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
611         struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
612         struct vfio_edid_region *base;
613         int ret;
614
615         base = kzalloc(sizeof(*base), GFP_KERNEL);
616         if (!base)
617                 return -ENOMEM;
618
619         /* TODO: Add multi-port and EDID extension block support */
620         base->vfio_edid_regs.edid_offset = EDID_BLOB_OFFSET;
621         base->vfio_edid_regs.edid_max_size = EDID_SIZE;
622         base->vfio_edid_regs.edid_size = EDID_SIZE;
623         base->vfio_edid_regs.max_xres = vgpu_edid_xres(port->id);
624         base->vfio_edid_regs.max_yres = vgpu_edid_yres(port->id);
625         base->edid_blob = port->edid->edid_block;
626
627         ret = intel_vgpu_register_reg(vgpu,
628                         VFIO_REGION_TYPE_GFX,
629                         VFIO_REGION_SUBTYPE_GFX_EDID,
630                         &intel_vgpu_regops_edid, EDID_SIZE,
631                         VFIO_REGION_INFO_FLAG_READ |
632                         VFIO_REGION_INFO_FLAG_WRITE |
633                         VFIO_REGION_INFO_FLAG_CAPS, base);
634
635         return ret;
636 }
637
638 static void kvmgt_put_vfio_device(void *vgpu)
639 {
640         if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
641                 return;
642
643         vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
644 }
645
646 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
647 {
648         struct intel_vgpu *vgpu = NULL;
649         struct intel_vgpu_type *type;
650         struct device *pdev;
651         void *gvt;
652         int ret;
653
654         pdev = mdev_parent_dev(mdev);
655         gvt = kdev_to_i915(pdev)->gvt;
656
657         type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj));
658         if (!type) {
659                 gvt_vgpu_err("failed to find type %s to create\n",
660                                                 kobject_name(kobj));
661                 ret = -EINVAL;
662                 goto out;
663         }
664
665         vgpu = intel_gvt_ops->vgpu_create(gvt, type);
666         if (IS_ERR_OR_NULL(vgpu)) {
667                 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
668                 gvt_err("failed to create intel vgpu: %d\n", ret);
669                 goto out;
670         }
671
672         INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
673
674         vgpu->vdev.mdev = mdev;
675         mdev_set_drvdata(mdev, vgpu);
676
677         gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
678                      dev_name(mdev_dev(mdev)));
679         ret = 0;
680
681 out:
682         return ret;
683 }
684
685 static int intel_vgpu_remove(struct mdev_device *mdev)
686 {
687         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
688
689         if (handle_valid(vgpu->handle))
690                 return -EBUSY;
691
692         intel_gvt_ops->vgpu_destroy(vgpu);
693         return 0;
694 }
695
696 static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
697                                      unsigned long action, void *data)
698 {
699         struct intel_vgpu *vgpu = container_of(nb,
700                                         struct intel_vgpu,
701                                         vdev.iommu_notifier);
702
703         if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
704                 struct vfio_iommu_type1_dma_unmap *unmap = data;
705                 struct gvt_dma *entry;
706                 unsigned long iov_pfn, end_iov_pfn;
707
708                 iov_pfn = unmap->iova >> PAGE_SHIFT;
709                 end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
710
711                 mutex_lock(&vgpu->vdev.cache_lock);
712                 for (; iov_pfn < end_iov_pfn; iov_pfn++) {
713                         entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
714                         if (!entry)
715                                 continue;
716
717                         gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
718                                            entry->size);
719                         __gvt_cache_remove_entry(vgpu, entry);
720                 }
721                 mutex_unlock(&vgpu->vdev.cache_lock);
722         }
723
724         return NOTIFY_OK;
725 }
726
727 static int intel_vgpu_group_notifier(struct notifier_block *nb,
728                                      unsigned long action, void *data)
729 {
730         struct intel_vgpu *vgpu = container_of(nb,
731                                         struct intel_vgpu,
732                                         vdev.group_notifier);
733
734         /* the only action we care about */
735         if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
736                 vgpu->vdev.kvm = data;
737
738                 if (!data)
739                         schedule_work(&vgpu->vdev.release_work);
740         }
741
742         return NOTIFY_OK;
743 }
744
745 static int intel_vgpu_open(struct mdev_device *mdev)
746 {
747         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
748         unsigned long events;
749         int ret;
750
751         vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
752         vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
753
754         events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
755         ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
756                                 &vgpu->vdev.iommu_notifier);
757         if (ret != 0) {
758                 gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
759                         ret);
760                 goto out;
761         }
762
763         events = VFIO_GROUP_NOTIFY_SET_KVM;
764         ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
765                                 &vgpu->vdev.group_notifier);
766         if (ret != 0) {
767                 gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
768                         ret);
769                 goto undo_iommu;
770         }
771
772         /* Take a module reference as mdev core doesn't take
773          * a reference for vendor driver.
774          */
775         if (!try_module_get(THIS_MODULE))
776                 goto undo_group;
777
778         ret = kvmgt_guest_init(mdev);
779         if (ret)
780                 goto undo_group;
781
782         intel_gvt_ops->vgpu_activate(vgpu);
783
784         atomic_set(&vgpu->vdev.released, 0);
785         return ret;
786
787 undo_group:
788         vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
789                                         &vgpu->vdev.group_notifier);
790
791 undo_iommu:
792         vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
793                                         &vgpu->vdev.iommu_notifier);
794 out:
795         return ret;
796 }
797
798 static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
799 {
800         struct eventfd_ctx *trigger;
801
802         trigger = vgpu->vdev.msi_trigger;
803         if (trigger) {
804                 eventfd_ctx_put(trigger);
805                 vgpu->vdev.msi_trigger = NULL;
806         }
807 }
808
809 static void __intel_vgpu_release(struct intel_vgpu *vgpu)
810 {
811         struct kvmgt_guest_info *info;
812         int ret;
813
814         if (!handle_valid(vgpu->handle))
815                 return;
816
817         if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
818                 return;
819
820         intel_gvt_ops->vgpu_release(vgpu);
821
822         ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
823                                         &vgpu->vdev.iommu_notifier);
824         WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
825
826         ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
827                                         &vgpu->vdev.group_notifier);
828         WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
829
830         /* dereference module reference taken at open */
831         module_put(THIS_MODULE);
832
833         info = (struct kvmgt_guest_info *)vgpu->handle;
834         kvmgt_guest_exit(info);
835
836         intel_vgpu_release_msi_eventfd_ctx(vgpu);
837
838         vgpu->vdev.kvm = NULL;
839         vgpu->handle = 0;
840 }
841
842 static void intel_vgpu_release(struct mdev_device *mdev)
843 {
844         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
845
846         __intel_vgpu_release(vgpu);
847 }
848
849 static void intel_vgpu_release_work(struct work_struct *work)
850 {
851         struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
852                                         vdev.release_work);
853
854         __intel_vgpu_release(vgpu);
855 }
856
857 static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
858 {
859         u32 start_lo, start_hi;
860         u32 mem_type;
861
862         start_lo = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
863                         PCI_BASE_ADDRESS_MEM_MASK;
864         mem_type = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space + bar)) &
865                         PCI_BASE_ADDRESS_MEM_TYPE_MASK;
866
867         switch (mem_type) {
868         case PCI_BASE_ADDRESS_MEM_TYPE_64:
869                 start_hi = (*(u32 *)(vgpu->cfg_space.virtual_cfg_space
870                                                 + bar + 4));
871                 break;
872         case PCI_BASE_ADDRESS_MEM_TYPE_32:
873         case PCI_BASE_ADDRESS_MEM_TYPE_1M:
874                 /* 1M mem BAR treated as 32-bit BAR */
875         default:
876                 /* mem unknown type treated as 32-bit BAR */
877                 start_hi = 0;
878                 break;
879         }
880
881         return ((u64)start_hi << 32) | start_lo;
882 }
883
884 static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
885                              void *buf, unsigned int count, bool is_write)
886 {
887         u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
888         int ret;
889
890         if (is_write)
891                 ret = intel_gvt_ops->emulate_mmio_write(vgpu,
892                                         bar_start + off, buf, count);
893         else
894                 ret = intel_gvt_ops->emulate_mmio_read(vgpu,
895                                         bar_start + off, buf, count);
896         return ret;
897 }
898
899 static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
900 {
901         return off >= vgpu_aperture_offset(vgpu) &&
902                off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
903 }
904
905 static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
906                 void *buf, unsigned long count, bool is_write)
907 {
908         void __iomem *aperture_va;
909
910         if (!intel_vgpu_in_aperture(vgpu, off) ||
911             !intel_vgpu_in_aperture(vgpu, off + count)) {
912                 gvt_vgpu_err("Invalid aperture offset %llu\n", off);
913                 return -EINVAL;
914         }
915
916         aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
917                                         ALIGN_DOWN(off, PAGE_SIZE),
918                                         count + offset_in_page(off));
919         if (!aperture_va)
920                 return -EIO;
921
922         if (is_write)
923                 memcpy_toio(aperture_va + offset_in_page(off), buf, count);
924         else
925                 memcpy_fromio(buf, aperture_va + offset_in_page(off), count);
926
927         io_mapping_unmap(aperture_va);
928
929         return 0;
930 }
931
932 static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
933                         size_t count, loff_t *ppos, bool is_write)
934 {
935         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
936         unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
937         u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
938         int ret = -EINVAL;
939
940
941         if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
942                 gvt_vgpu_err("invalid index: %u\n", index);
943                 return -EINVAL;
944         }
945
946         switch (index) {
947         case VFIO_PCI_CONFIG_REGION_INDEX:
948                 if (is_write)
949                         ret = intel_gvt_ops->emulate_cfg_write(vgpu, pos,
950                                                 buf, count);
951                 else
952                         ret = intel_gvt_ops->emulate_cfg_read(vgpu, pos,
953                                                 buf, count);
954                 break;
955         case VFIO_PCI_BAR0_REGION_INDEX:
956                 ret = intel_vgpu_bar_rw(vgpu, PCI_BASE_ADDRESS_0, pos,
957                                         buf, count, is_write);
958                 break;
959         case VFIO_PCI_BAR2_REGION_INDEX:
960                 ret = intel_vgpu_aperture_rw(vgpu, pos, buf, count, is_write);
961                 break;
962         case VFIO_PCI_BAR1_REGION_INDEX:
963         case VFIO_PCI_BAR3_REGION_INDEX:
964         case VFIO_PCI_BAR4_REGION_INDEX:
965         case VFIO_PCI_BAR5_REGION_INDEX:
966         case VFIO_PCI_VGA_REGION_INDEX:
967         case VFIO_PCI_ROM_REGION_INDEX:
968                 break;
969         default:
970                 if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
971                         return -EINVAL;
972
973                 index -= VFIO_PCI_NUM_REGIONS;
974                 return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
975                                 ppos, is_write);
976         }
977
978         return ret == 0 ? count : ret;
979 }
980
981 static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
982 {
983         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
984         unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
985         struct intel_gvt *gvt = vgpu->gvt;
986         int offset;
987
988         /* Only allow MMIO GGTT entry access */
989         if (index != PCI_BASE_ADDRESS_0)
990                 return false;
991
992         offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) -
993                 intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
994
995         return (offset >= gvt->device_info.gtt_start_offset &&
996                 offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ?
997                         true : false;
998 }
999
1000 static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
1001                         size_t count, loff_t *ppos)
1002 {
1003         unsigned int done = 0;
1004         int ret;
1005
1006         while (count) {
1007                 size_t filled;
1008
1009                 /* Only support GGTT entry 8 bytes read */
1010                 if (count >= 8 && !(*ppos % 8) &&
1011                         gtt_entry(mdev, ppos)) {
1012                         u64 val;
1013
1014                         ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1015                                         ppos, false);
1016                         if (ret <= 0)
1017                                 goto read_err;
1018
1019                         if (copy_to_user(buf, &val, sizeof(val)))
1020                                 goto read_err;
1021
1022                         filled = 8;
1023                 } else if (count >= 4 && !(*ppos % 4)) {
1024                         u32 val;
1025
1026                         ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1027                                         ppos, false);
1028                         if (ret <= 0)
1029                                 goto read_err;
1030
1031                         if (copy_to_user(buf, &val, sizeof(val)))
1032                                 goto read_err;
1033
1034                         filled = 4;
1035                 } else if (count >= 2 && !(*ppos % 2)) {
1036                         u16 val;
1037
1038                         ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1039                                         ppos, false);
1040                         if (ret <= 0)
1041                                 goto read_err;
1042
1043                         if (copy_to_user(buf, &val, sizeof(val)))
1044                                 goto read_err;
1045
1046                         filled = 2;
1047                 } else {
1048                         u8 val;
1049
1050                         ret = intel_vgpu_rw(mdev, &val, sizeof(val), ppos,
1051                                         false);
1052                         if (ret <= 0)
1053                                 goto read_err;
1054
1055                         if (copy_to_user(buf, &val, sizeof(val)))
1056                                 goto read_err;
1057
1058                         filled = 1;
1059                 }
1060
1061                 count -= filled;
1062                 done += filled;
1063                 *ppos += filled;
1064                 buf += filled;
1065         }
1066
1067         return done;
1068
1069 read_err:
1070         return -EFAULT;
1071 }
1072
1073 static ssize_t intel_vgpu_write(struct mdev_device *mdev,
1074                                 const char __user *buf,
1075                                 size_t count, loff_t *ppos)
1076 {
1077         unsigned int done = 0;
1078         int ret;
1079
1080         while (count) {
1081                 size_t filled;
1082
1083                 /* Only support GGTT entry 8 bytes write */
1084                 if (count >= 8 && !(*ppos % 8) &&
1085                         gtt_entry(mdev, ppos)) {
1086                         u64 val;
1087
1088                         if (copy_from_user(&val, buf, sizeof(val)))
1089                                 goto write_err;
1090
1091                         ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1092                                         ppos, true);
1093                         if (ret <= 0)
1094                                 goto write_err;
1095
1096                         filled = 8;
1097                 } else if (count >= 4 && !(*ppos % 4)) {
1098                         u32 val;
1099
1100                         if (copy_from_user(&val, buf, sizeof(val)))
1101                                 goto write_err;
1102
1103                         ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
1104                                         ppos, true);
1105                         if (ret <= 0)
1106                                 goto write_err;
1107
1108                         filled = 4;
1109                 } else if (count >= 2 && !(*ppos % 2)) {
1110                         u16 val;
1111
1112                         if (copy_from_user(&val, buf, sizeof(val)))
1113                                 goto write_err;
1114
1115                         ret = intel_vgpu_rw(mdev, (char *)&val,
1116                                         sizeof(val), ppos, true);
1117                         if (ret <= 0)
1118                                 goto write_err;
1119
1120                         filled = 2;
1121                 } else {
1122                         u8 val;
1123
1124                         if (copy_from_user(&val, buf, sizeof(val)))
1125                                 goto write_err;
1126
1127                         ret = intel_vgpu_rw(mdev, &val, sizeof(val),
1128                                         ppos, true);
1129                         if (ret <= 0)
1130                                 goto write_err;
1131
1132                         filled = 1;
1133                 }
1134
1135                 count -= filled;
1136                 done += filled;
1137                 *ppos += filled;
1138                 buf += filled;
1139         }
1140
1141         return done;
1142 write_err:
1143         return -EFAULT;
1144 }
1145
1146 static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
1147 {
1148         unsigned int index;
1149         u64 virtaddr;
1150         unsigned long req_size, pgoff, req_start;
1151         pgprot_t pg_prot;
1152         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1153
1154         index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
1155         if (index >= VFIO_PCI_ROM_REGION_INDEX)
1156                 return -EINVAL;
1157
1158         if (vma->vm_end < vma->vm_start)
1159                 return -EINVAL;
1160         if ((vma->vm_flags & VM_SHARED) == 0)
1161                 return -EINVAL;
1162         if (index != VFIO_PCI_BAR2_REGION_INDEX)
1163                 return -EINVAL;
1164
1165         pg_prot = vma->vm_page_prot;
1166         virtaddr = vma->vm_start;
1167         req_size = vma->vm_end - vma->vm_start;
1168         pgoff = vma->vm_pgoff &
1169                 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
1170         req_start = pgoff << PAGE_SHIFT;
1171
1172         if (!intel_vgpu_in_aperture(vgpu, req_start))
1173                 return -EINVAL;
1174         if (req_start + req_size >
1175             vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
1176                 return -EINVAL;
1177
1178         pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
1179
1180         return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
1181 }
1182
1183 static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
1184 {
1185         if (type == VFIO_PCI_INTX_IRQ_INDEX || type == VFIO_PCI_MSI_IRQ_INDEX)
1186                 return 1;
1187
1188         return 0;
1189 }
1190
1191 static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
1192                         unsigned int index, unsigned int start,
1193                         unsigned int count, u32 flags,
1194                         void *data)
1195 {
1196         return 0;
1197 }
1198
1199 static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
1200                         unsigned int index, unsigned int start,
1201                         unsigned int count, u32 flags, void *data)
1202 {
1203         return 0;
1204 }
1205
1206 static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
1207                 unsigned int index, unsigned int start, unsigned int count,
1208                 u32 flags, void *data)
1209 {
1210         return 0;
1211 }
1212
1213 static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
1214                 unsigned int index, unsigned int start, unsigned int count,
1215                 u32 flags, void *data)
1216 {
1217         struct eventfd_ctx *trigger;
1218
1219         if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
1220                 int fd = *(int *)data;
1221
1222                 trigger = eventfd_ctx_fdget(fd);
1223                 if (IS_ERR(trigger)) {
1224                         gvt_vgpu_err("eventfd_ctx_fdget failed\n");
1225                         return PTR_ERR(trigger);
1226                 }
1227                 vgpu->vdev.msi_trigger = trigger;
1228         } else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
1229                 intel_vgpu_release_msi_eventfd_ctx(vgpu);
1230
1231         return 0;
1232 }
1233
1234 static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
1235                 unsigned int index, unsigned int start, unsigned int count,
1236                 void *data)
1237 {
1238         int (*func)(struct intel_vgpu *vgpu, unsigned int index,
1239                         unsigned int start, unsigned int count, u32 flags,
1240                         void *data) = NULL;
1241
1242         switch (index) {
1243         case VFIO_PCI_INTX_IRQ_INDEX:
1244                 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1245                 case VFIO_IRQ_SET_ACTION_MASK:
1246                         func = intel_vgpu_set_intx_mask;
1247                         break;
1248                 case VFIO_IRQ_SET_ACTION_UNMASK:
1249                         func = intel_vgpu_set_intx_unmask;
1250                         break;
1251                 case VFIO_IRQ_SET_ACTION_TRIGGER:
1252                         func = intel_vgpu_set_intx_trigger;
1253                         break;
1254                 }
1255                 break;
1256         case VFIO_PCI_MSI_IRQ_INDEX:
1257                 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
1258                 case VFIO_IRQ_SET_ACTION_MASK:
1259                 case VFIO_IRQ_SET_ACTION_UNMASK:
1260                         /* XXX Need masking support exported */
1261                         break;
1262                 case VFIO_IRQ_SET_ACTION_TRIGGER:
1263                         func = intel_vgpu_set_msi_trigger;
1264                         break;
1265                 }
1266                 break;
1267         }
1268
1269         if (!func)
1270                 return -ENOTTY;
1271
1272         return func(vgpu, index, start, count, flags, data);
1273 }
1274
1275 static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1276                              unsigned long arg)
1277 {
1278         struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
1279         unsigned long minsz;
1280
1281         gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
1282
1283         if (cmd == VFIO_DEVICE_GET_INFO) {
1284                 struct vfio_device_info info;
1285
1286                 minsz = offsetofend(struct vfio_device_info, num_irqs);
1287
1288                 if (copy_from_user(&info, (void __user *)arg, minsz))
1289                         return -EFAULT;
1290
1291                 if (info.argsz < minsz)
1292                         return -EINVAL;
1293
1294                 info.flags = VFIO_DEVICE_FLAGS_PCI;
1295                 info.flags |= VFIO_DEVICE_FLAGS_RESET;
1296                 info.num_regions = VFIO_PCI_NUM_REGIONS +
1297                                 vgpu->vdev.num_regions;
1298                 info.num_irqs = VFIO_PCI_NUM_IRQS;
1299
1300                 return copy_to_user((void __user *)arg, &info, minsz) ?
1301                         -EFAULT : 0;
1302
1303         } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
1304                 struct vfio_region_info info;
1305                 struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
1306                 unsigned int i;
1307                 int ret;
1308                 struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
1309                 int nr_areas = 1;
1310                 int cap_type_id;
1311
1312                 minsz = offsetofend(struct vfio_region_info, offset);
1313
1314                 if (copy_from_user(&info, (void __user *)arg, minsz))
1315                         return -EFAULT;
1316
1317                 if (info.argsz < minsz)
1318                         return -EINVAL;
1319
1320                 switch (info.index) {
1321                 case VFIO_PCI_CONFIG_REGION_INDEX:
1322                         info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1323                         info.size = vgpu->gvt->device_info.cfg_space_size;
1324                         info.flags = VFIO_REGION_INFO_FLAG_READ |
1325                                      VFIO_REGION_INFO_FLAG_WRITE;
1326                         break;
1327                 case VFIO_PCI_BAR0_REGION_INDEX:
1328                         info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1329                         info.size = vgpu->cfg_space.bar[info.index].size;
1330                         if (!info.size) {
1331                                 info.flags = 0;
1332                                 break;
1333                         }
1334
1335                         info.flags = VFIO_REGION_INFO_FLAG_READ |
1336                                      VFIO_REGION_INFO_FLAG_WRITE;
1337                         break;
1338                 case VFIO_PCI_BAR1_REGION_INDEX:
1339                         info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1340                         info.size = 0;
1341                         info.flags = 0;
1342                         break;
1343                 case VFIO_PCI_BAR2_REGION_INDEX:
1344                         info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1345                         info.flags = VFIO_REGION_INFO_FLAG_CAPS |
1346                                         VFIO_REGION_INFO_FLAG_MMAP |
1347                                         VFIO_REGION_INFO_FLAG_READ |
1348                                         VFIO_REGION_INFO_FLAG_WRITE;
1349                         info.size = gvt_aperture_sz(vgpu->gvt);
1350
1351                         sparse = kzalloc(struct_size(sparse, areas, nr_areas),
1352                                          GFP_KERNEL);
1353                         if (!sparse)
1354                                 return -ENOMEM;
1355
1356                         sparse->header.id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1357                         sparse->header.version = 1;
1358                         sparse->nr_areas = nr_areas;
1359                         cap_type_id = VFIO_REGION_INFO_CAP_SPARSE_MMAP;
1360                         sparse->areas[0].offset =
1361                                         PAGE_ALIGN(vgpu_aperture_offset(vgpu));
1362                         sparse->areas[0].size = vgpu_aperture_sz(vgpu);
1363                         break;
1364
1365                 case VFIO_PCI_BAR3_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
1366                         info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1367                         info.size = 0;
1368                         info.flags = 0;
1369
1370                         gvt_dbg_core("get region info bar:%d\n", info.index);
1371                         break;
1372
1373                 case VFIO_PCI_ROM_REGION_INDEX:
1374                 case VFIO_PCI_VGA_REGION_INDEX:
1375                         info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
1376                         info.size = 0;
1377                         info.flags = 0;
1378
1379                         gvt_dbg_core("get region info index:%d\n", info.index);
1380                         break;
1381                 default:
1382                         {
1383                                 struct vfio_region_info_cap_type cap_type = {
1384                                         .header.id = VFIO_REGION_INFO_CAP_TYPE,
1385                                         .header.version = 1 };
1386
1387                                 if (info.index >= VFIO_PCI_NUM_REGIONS +
1388                                                 vgpu->vdev.num_regions)
1389                                         return -EINVAL;
1390                                 info.index =
1391                                         array_index_nospec(info.index,
1392                                                         VFIO_PCI_NUM_REGIONS +
1393                                                         vgpu->vdev.num_regions);
1394
1395                                 i = info.index - VFIO_PCI_NUM_REGIONS;
1396
1397                                 info.offset =
1398                                         VFIO_PCI_INDEX_TO_OFFSET(info.index);
1399                                 info.size = vgpu->vdev.region[i].size;
1400                                 info.flags = vgpu->vdev.region[i].flags;
1401
1402                                 cap_type.type = vgpu->vdev.region[i].type;
1403                                 cap_type.subtype = vgpu->vdev.region[i].subtype;
1404
1405                                 ret = vfio_info_add_capability(&caps,
1406                                                         &cap_type.header,
1407                                                         sizeof(cap_type));
1408                                 if (ret)
1409                                         return ret;
1410                         }
1411                 }
1412
1413                 if ((info.flags & VFIO_REGION_INFO_FLAG_CAPS) && sparse) {
1414                         switch (cap_type_id) {
1415                         case VFIO_REGION_INFO_CAP_SPARSE_MMAP:
1416                                 ret = vfio_info_add_capability(&caps,
1417                                         &sparse->header,
1418                                         struct_size(sparse, areas,
1419                                                     sparse->nr_areas));
1420                                 if (ret) {
1421                                         kfree(sparse);
1422                                         return ret;
1423                                 }
1424                                 break;
1425                         default:
1426                                 kfree(sparse);
1427                                 return -EINVAL;
1428                         }
1429                 }
1430
1431                 if (caps.size) {
1432                         info.flags |= VFIO_REGION_INFO_FLAG_CAPS;
1433                         if (info.argsz < sizeof(info) + caps.size) {
1434                                 info.argsz = sizeof(info) + caps.size;
1435                                 info.cap_offset = 0;
1436                         } else {
1437                                 vfio_info_cap_shift(&caps, sizeof(info));
1438                                 if (copy_to_user((void __user *)arg +
1439                                                   sizeof(info), caps.buf,
1440                                                   caps.size)) {
1441                                         kfree(caps.buf);
1442                                         kfree(sparse);
1443                                         return -EFAULT;
1444                                 }
1445                                 info.cap_offset = sizeof(info);
1446                         }
1447
1448                         kfree(caps.buf);
1449                 }
1450
1451                 kfree(sparse);
1452                 return copy_to_user((void __user *)arg, &info, minsz) ?
1453                         -EFAULT : 0;
1454         } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
1455                 struct vfio_irq_info info;
1456
1457                 minsz = offsetofend(struct vfio_irq_info, count);
1458
1459                 if (copy_from_user(&info, (void __user *)arg, minsz))
1460                         return -EFAULT;
1461
1462                 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
1463                         return -EINVAL;
1464
1465                 switch (info.index) {
1466                 case VFIO_PCI_INTX_IRQ_INDEX:
1467                 case VFIO_PCI_MSI_IRQ_INDEX:
1468                         break;
1469                 default:
1470                         return -EINVAL;
1471                 }
1472
1473                 info.flags = VFIO_IRQ_INFO_EVENTFD;
1474
1475                 info.count = intel_vgpu_get_irq_count(vgpu, info.index);
1476
1477                 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
1478                         info.flags |= (VFIO_IRQ_INFO_MASKABLE |
1479                                        VFIO_IRQ_INFO_AUTOMASKED);
1480                 else
1481                         info.flags |= VFIO_IRQ_INFO_NORESIZE;
1482
1483                 return copy_to_user((void __user *)arg, &info, minsz) ?
1484                         -EFAULT : 0;
1485         } else if (cmd == VFIO_DEVICE_SET_IRQS) {
1486                 struct vfio_irq_set hdr;
1487                 u8 *data = NULL;
1488                 int ret = 0;
1489                 size_t data_size = 0;
1490
1491                 minsz = offsetofend(struct vfio_irq_set, count);
1492
1493                 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1494                         return -EFAULT;
1495
1496                 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
1497                         int max = intel_vgpu_get_irq_count(vgpu, hdr.index);
1498
1499                         ret = vfio_set_irqs_validate_and_prepare(&hdr, max,
1500                                                 VFIO_PCI_NUM_IRQS, &data_size);
1501                         if (ret) {
1502                                 gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n");
1503                                 return -EINVAL;
1504                         }
1505                         if (data_size) {
1506                                 data = memdup_user((void __user *)(arg + minsz),
1507                                                    data_size);
1508                                 if (IS_ERR(data))
1509                                         return PTR_ERR(data);
1510                         }
1511                 }
1512
1513                 ret = intel_vgpu_set_irqs(vgpu, hdr.flags, hdr.index,
1514                                         hdr.start, hdr.count, data);
1515                 kfree(data);
1516
1517                 return ret;
1518         } else if (cmd == VFIO_DEVICE_RESET) {
1519                 intel_gvt_ops->vgpu_reset(vgpu);
1520                 return 0;
1521         } else if (cmd == VFIO_DEVICE_QUERY_GFX_PLANE) {
1522                 struct vfio_device_gfx_plane_info dmabuf;
1523                 int ret = 0;
1524
1525                 minsz = offsetofend(struct vfio_device_gfx_plane_info,
1526                                     dmabuf_id);
1527                 if (copy_from_user(&dmabuf, (void __user *)arg, minsz))
1528                         return -EFAULT;
1529                 if (dmabuf.argsz < minsz)
1530                         return -EINVAL;
1531
1532                 ret = intel_gvt_ops->vgpu_query_plane(vgpu, &dmabuf);
1533                 if (ret != 0)
1534                         return ret;
1535
1536                 return copy_to_user((void __user *)arg, &dmabuf, minsz) ?
1537                                                                 -EFAULT : 0;
1538         } else if (cmd == VFIO_DEVICE_GET_GFX_DMABUF) {
1539                 __u32 dmabuf_id;
1540                 __s32 dmabuf_fd;
1541
1542                 if (get_user(dmabuf_id, (__u32 __user *)arg))
1543                         return -EFAULT;
1544
1545                 dmabuf_fd = intel_gvt_ops->vgpu_get_dmabuf(vgpu, dmabuf_id);
1546                 return dmabuf_fd;
1547
1548         }
1549
1550         return -ENOTTY;
1551 }
1552
1553 static ssize_t
1554 vgpu_id_show(struct device *dev, struct device_attribute *attr,
1555              char *buf)
1556 {
1557         struct mdev_device *mdev = mdev_from_dev(dev);
1558
1559         if (mdev) {
1560                 struct intel_vgpu *vgpu = (struct intel_vgpu *)
1561                         mdev_get_drvdata(mdev);
1562                 return sprintf(buf, "%d\n", vgpu->id);
1563         }
1564         return sprintf(buf, "\n");
1565 }
1566
1567 static DEVICE_ATTR_RO(vgpu_id);
1568
1569 static struct attribute *intel_vgpu_attrs[] = {
1570         &dev_attr_vgpu_id.attr,
1571         NULL
1572 };
1573
1574 static const struct attribute_group intel_vgpu_group = {
1575         .name = "intel_vgpu",
1576         .attrs = intel_vgpu_attrs,
1577 };
1578
1579 static const struct attribute_group *intel_vgpu_groups[] = {
1580         &intel_vgpu_group,
1581         NULL,
1582 };
1583
1584 static struct mdev_parent_ops intel_vgpu_ops = {
1585         .mdev_attr_groups       = intel_vgpu_groups,
1586         .create                 = intel_vgpu_create,
1587         .remove                 = intel_vgpu_remove,
1588
1589         .open                   = intel_vgpu_open,
1590         .release                = intel_vgpu_release,
1591
1592         .read                   = intel_vgpu_read,
1593         .write                  = intel_vgpu_write,
1594         .mmap                   = intel_vgpu_mmap,
1595         .ioctl                  = intel_vgpu_ioctl,
1596 };
1597
1598 static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
1599 {
1600         struct attribute_group **kvm_vgpu_type_groups;
1601
1602         intel_gvt_ops = ops;
1603         if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups))
1604                 return -EFAULT;
1605         intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
1606
1607         return mdev_register_device(dev, &intel_vgpu_ops);
1608 }
1609
1610 static void kvmgt_host_exit(struct device *dev)
1611 {
1612         mdev_unregister_device(dev);
1613 }
1614
1615 static int kvmgt_page_track_add(unsigned long handle, u64 gfn)
1616 {
1617         struct kvmgt_guest_info *info;
1618         struct kvm *kvm;
1619         struct kvm_memory_slot *slot;
1620         int idx;
1621
1622         if (!handle_valid(handle))
1623                 return -ESRCH;
1624
1625         info = (struct kvmgt_guest_info *)handle;
1626         kvm = info->kvm;
1627
1628         idx = srcu_read_lock(&kvm->srcu);
1629         slot = gfn_to_memslot(kvm, gfn);
1630         if (!slot) {
1631                 srcu_read_unlock(&kvm->srcu, idx);
1632                 return -EINVAL;
1633         }
1634
1635         spin_lock(&kvm->mmu_lock);
1636
1637         if (kvmgt_gfn_is_write_protected(info, gfn))
1638                 goto out;
1639
1640         kvm_slot_page_track_add_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1641         kvmgt_protect_table_add(info, gfn);
1642
1643 out:
1644         spin_unlock(&kvm->mmu_lock);
1645         srcu_read_unlock(&kvm->srcu, idx);
1646         return 0;
1647 }
1648
1649 static int kvmgt_page_track_remove(unsigned long handle, u64 gfn)
1650 {
1651         struct kvmgt_guest_info *info;
1652         struct kvm *kvm;
1653         struct kvm_memory_slot *slot;
1654         int idx;
1655
1656         if (!handle_valid(handle))
1657                 return 0;
1658
1659         info = (struct kvmgt_guest_info *)handle;
1660         kvm = info->kvm;
1661
1662         idx = srcu_read_lock(&kvm->srcu);
1663         slot = gfn_to_memslot(kvm, gfn);
1664         if (!slot) {
1665                 srcu_read_unlock(&kvm->srcu, idx);
1666                 return -EINVAL;
1667         }
1668
1669         spin_lock(&kvm->mmu_lock);
1670
1671         if (!kvmgt_gfn_is_write_protected(info, gfn))
1672                 goto out;
1673
1674         kvm_slot_page_track_remove_page(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE);
1675         kvmgt_protect_table_del(info, gfn);
1676
1677 out:
1678         spin_unlock(&kvm->mmu_lock);
1679         srcu_read_unlock(&kvm->srcu, idx);
1680         return 0;
1681 }
1682
1683 static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1684                 const u8 *val, int len,
1685                 struct kvm_page_track_notifier_node *node)
1686 {
1687         struct kvmgt_guest_info *info = container_of(node,
1688                                         struct kvmgt_guest_info, track_node);
1689
1690         if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
1691                 intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
1692                                                      (void *)val, len);
1693 }
1694
1695 static void kvmgt_page_track_flush_slot(struct kvm *kvm,
1696                 struct kvm_memory_slot *slot,
1697                 struct kvm_page_track_notifier_node *node)
1698 {
1699         int i;
1700         gfn_t gfn;
1701         struct kvmgt_guest_info *info = container_of(node,
1702                                         struct kvmgt_guest_info, track_node);
1703
1704         spin_lock(&kvm->mmu_lock);
1705         for (i = 0; i < slot->npages; i++) {
1706                 gfn = slot->base_gfn + i;
1707                 if (kvmgt_gfn_is_write_protected(info, gfn)) {
1708                         kvm_slot_page_track_remove_page(kvm, slot, gfn,
1709                                                 KVM_PAGE_TRACK_WRITE);
1710                         kvmgt_protect_table_del(info, gfn);
1711                 }
1712         }
1713         spin_unlock(&kvm->mmu_lock);
1714 }
1715
1716 static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu, struct kvm *kvm)
1717 {
1718         struct intel_vgpu *itr;
1719         struct kvmgt_guest_info *info;
1720         int id;
1721         bool ret = false;
1722
1723         mutex_lock(&vgpu->gvt->lock);
1724         for_each_active_vgpu(vgpu->gvt, itr, id) {
1725                 if (!handle_valid(itr->handle))
1726                         continue;
1727
1728                 info = (struct kvmgt_guest_info *)itr->handle;
1729                 if (kvm && kvm == info->kvm) {
1730                         ret = true;
1731                         goto out;
1732                 }
1733         }
1734 out:
1735         mutex_unlock(&vgpu->gvt->lock);
1736         return ret;
1737 }
1738
1739 static int kvmgt_guest_init(struct mdev_device *mdev)
1740 {
1741         struct kvmgt_guest_info *info;
1742         struct intel_vgpu *vgpu;
1743         struct kvm *kvm;
1744
1745         vgpu = mdev_get_drvdata(mdev);
1746         if (handle_valid(vgpu->handle))
1747                 return -EEXIST;
1748
1749         kvm = vgpu->vdev.kvm;
1750         if (!kvm || kvm->mm != current->mm) {
1751                 gvt_vgpu_err("KVM is required to use Intel vGPU\n");
1752                 return -ESRCH;
1753         }
1754
1755         if (__kvmgt_vgpu_exist(vgpu, kvm))
1756                 return -EEXIST;
1757
1758         info = vzalloc(sizeof(struct kvmgt_guest_info));
1759         if (!info)
1760                 return -ENOMEM;
1761
1762         vgpu->handle = (unsigned long)info;
1763         info->vgpu = vgpu;
1764         info->kvm = kvm;
1765         kvm_get_kvm(info->kvm);
1766
1767         kvmgt_protect_table_init(info);
1768         gvt_cache_init(vgpu);
1769
1770         info->track_node.track_write = kvmgt_page_track_write;
1771         info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
1772         kvm_page_track_register_notifier(kvm, &info->track_node);
1773
1774         info->debugfs_cache_entries = debugfs_create_ulong(
1775                                                 "kvmgt_nr_cache_entries",
1776                                                 0444, vgpu->debugfs,
1777                                                 &vgpu->vdev.nr_cache_entries);
1778         return 0;
1779 }
1780
1781 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
1782 {
1783         debugfs_remove(info->debugfs_cache_entries);
1784
1785         kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
1786         kvm_put_kvm(info->kvm);
1787         kvmgt_protect_table_destroy(info);
1788         gvt_cache_destroy(info->vgpu);
1789         vfree(info);
1790
1791         return true;
1792 }
1793
1794 static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
1795 {
1796         /* nothing to do here */
1797         return 0;
1798 }
1799
1800 static void kvmgt_detach_vgpu(void *p_vgpu)
1801 {
1802         int i;
1803         struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
1804
1805         if (!vgpu->vdev.region)
1806                 return;
1807
1808         for (i = 0; i < vgpu->vdev.num_regions; i++)
1809                 if (vgpu->vdev.region[i].ops->release)
1810                         vgpu->vdev.region[i].ops->release(vgpu,
1811                                         &vgpu->vdev.region[i]);
1812         vgpu->vdev.num_regions = 0;
1813         kfree(vgpu->vdev.region);
1814         vgpu->vdev.region = NULL;
1815 }
1816
1817 static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
1818 {
1819         struct kvmgt_guest_info *info;
1820         struct intel_vgpu *vgpu;
1821
1822         if (!handle_valid(handle))
1823                 return -ESRCH;
1824
1825         info = (struct kvmgt_guest_info *)handle;
1826         vgpu = info->vgpu;
1827
1828         /*
1829          * When guest is poweroff, msi_trigger is set to NULL, but vgpu's
1830          * config and mmio register isn't restored to default during guest
1831          * poweroff. If this vgpu is still used in next vm, this vgpu's pipe
1832          * may be enabled, then once this vgpu is active, it will get inject
1833          * vblank interrupt request. But msi_trigger is null until msi is
1834          * enabled by guest. so if msi_trigger is null, success is still
1835          * returned and don't inject interrupt into guest.
1836          */
1837         if (vgpu->vdev.msi_trigger == NULL)
1838                 return 0;
1839
1840         if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
1841                 return 0;
1842
1843         return -EFAULT;
1844 }
1845
1846 static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
1847 {
1848         struct kvmgt_guest_info *info;
1849         kvm_pfn_t pfn;
1850
1851         if (!handle_valid(handle))
1852                 return INTEL_GVT_INVALID_ADDR;
1853
1854         info = (struct kvmgt_guest_info *)handle;
1855
1856         pfn = gfn_to_pfn(info->kvm, gfn);
1857         if (is_error_noslot_pfn(pfn))
1858                 return INTEL_GVT_INVALID_ADDR;
1859
1860         return pfn;
1861 }
1862
1863 static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
1864                 unsigned long size, dma_addr_t *dma_addr)
1865 {
1866         struct kvmgt_guest_info *info;
1867         struct intel_vgpu *vgpu;
1868         struct gvt_dma *entry;
1869         int ret;
1870
1871         if (!handle_valid(handle))
1872                 return -EINVAL;
1873
1874         info = (struct kvmgt_guest_info *)handle;
1875         vgpu = info->vgpu;
1876
1877         mutex_lock(&info->vgpu->vdev.cache_lock);
1878
1879         entry = __gvt_cache_find_gfn(info->vgpu, gfn);
1880         if (!entry) {
1881                 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1882                 if (ret)
1883                         goto err_unlock;
1884
1885                 ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
1886                 if (ret)
1887                         goto err_unmap;
1888         } else if (entry->size != size) {
1889                 /* the same gfn with different size: unmap and re-map */
1890                 gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
1891                 __gvt_cache_remove_entry(vgpu, entry);
1892
1893                 ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
1894                 if (ret)
1895                         goto err_unlock;
1896
1897                 ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
1898                 if (ret)
1899                         goto err_unmap;
1900         } else {
1901                 kref_get(&entry->ref);
1902                 *dma_addr = entry->dma_addr;
1903         }
1904
1905         mutex_unlock(&info->vgpu->vdev.cache_lock);
1906         return 0;
1907
1908 err_unmap:
1909         gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
1910 err_unlock:
1911         mutex_unlock(&info->vgpu->vdev.cache_lock);
1912         return ret;
1913 }
1914
1915 static void __gvt_dma_release(struct kref *ref)
1916 {
1917         struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
1918
1919         gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
1920                            entry->size);
1921         __gvt_cache_remove_entry(entry->vgpu, entry);
1922 }
1923
1924 static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
1925 {
1926         struct kvmgt_guest_info *info;
1927         struct gvt_dma *entry;
1928
1929         if (!handle_valid(handle))
1930                 return;
1931
1932         info = (struct kvmgt_guest_info *)handle;
1933
1934         mutex_lock(&info->vgpu->vdev.cache_lock);
1935         entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
1936         if (entry)
1937                 kref_put(&entry->ref, __gvt_dma_release);
1938         mutex_unlock(&info->vgpu->vdev.cache_lock);
1939 }
1940
1941 static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
1942                         void *buf, unsigned long len, bool write)
1943 {
1944         struct kvmgt_guest_info *info;
1945         struct kvm *kvm;
1946         int idx, ret;
1947         bool kthread = current->mm == NULL;
1948
1949         if (!handle_valid(handle))
1950                 return -ESRCH;
1951
1952         info = (struct kvmgt_guest_info *)handle;
1953         kvm = info->kvm;
1954
1955         if (kthread) {
1956                 if (!mmget_not_zero(kvm->mm))
1957                         return -EFAULT;
1958                 use_mm(kvm->mm);
1959         }
1960
1961         idx = srcu_read_lock(&kvm->srcu);
1962         ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
1963                       kvm_read_guest(kvm, gpa, buf, len);
1964         srcu_read_unlock(&kvm->srcu, idx);
1965
1966         if (kthread) {
1967                 unuse_mm(kvm->mm);
1968                 mmput(kvm->mm);
1969         }
1970
1971         return ret;
1972 }
1973
1974 static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
1975                         void *buf, unsigned long len)
1976 {
1977         return kvmgt_rw_gpa(handle, gpa, buf, len, false);
1978 }
1979
1980 static int kvmgt_write_gpa(unsigned long handle, unsigned long gpa,
1981                         void *buf, unsigned long len)
1982 {
1983         return kvmgt_rw_gpa(handle, gpa, buf, len, true);
1984 }
1985
1986 static unsigned long kvmgt_virt_to_pfn(void *addr)
1987 {
1988         return PFN_DOWN(__pa(addr));
1989 }
1990
1991 static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
1992 {
1993         struct kvmgt_guest_info *info;
1994         struct kvm *kvm;
1995         int idx;
1996         bool ret;
1997
1998         if (!handle_valid(handle))
1999                 return false;
2000
2001         info = (struct kvmgt_guest_info *)handle;
2002         kvm = info->kvm;
2003
2004         idx = srcu_read_lock(&kvm->srcu);
2005         ret = kvm_is_visible_gfn(kvm, gfn);
2006         srcu_read_unlock(&kvm->srcu, idx);
2007
2008         return ret;
2009 }
2010
2011 static struct intel_gvt_mpt kvmgt_mpt = {
2012         .type = INTEL_GVT_HYPERVISOR_KVM,
2013         .host_init = kvmgt_host_init,
2014         .host_exit = kvmgt_host_exit,
2015         .attach_vgpu = kvmgt_attach_vgpu,
2016         .detach_vgpu = kvmgt_detach_vgpu,
2017         .inject_msi = kvmgt_inject_msi,
2018         .from_virt_to_mfn = kvmgt_virt_to_pfn,
2019         .enable_page_track = kvmgt_page_track_add,
2020         .disable_page_track = kvmgt_page_track_remove,
2021         .read_gpa = kvmgt_read_gpa,
2022         .write_gpa = kvmgt_write_gpa,
2023         .gfn_to_mfn = kvmgt_gfn_to_pfn,
2024         .dma_map_guest_page = kvmgt_dma_map_guest_page,
2025         .dma_unmap_guest_page = kvmgt_dma_unmap_guest_page,
2026         .set_opregion = kvmgt_set_opregion,
2027         .set_edid = kvmgt_set_edid,
2028         .get_vfio_device = kvmgt_get_vfio_device,
2029         .put_vfio_device = kvmgt_put_vfio_device,
2030         .is_valid_gfn = kvmgt_is_valid_gfn,
2031 };
2032
2033 static int __init kvmgt_init(void)
2034 {
2035         if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0)
2036                 return -ENODEV;
2037         return 0;
2038 }
2039
2040 static void __exit kvmgt_exit(void)
2041 {
2042         intel_gvt_unregister_hypervisor();
2043 }
2044
2045 module_init(kvmgt_init);
2046 module_exit(kvmgt_exit);
2047
2048 MODULE_LICENSE("GPL and additional rights");
2049 MODULE_AUTHOR("Intel Corporation");