KVM: selftests: Restrict test region to 48-bit physical addresses when using nested
[linux-2.6-microblaze.git] / drivers / vhost / vdpa.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018-2020 Intel Corporation.
4  * Copyright (C) 2020 Red Hat, Inc.
5  *
6  * Author: Tiwei Bie <tiwei.bie@intel.com>
7  *         Jason Wang <jasowang@redhat.com>
8  *
9  * Thanks Michael S. Tsirkin for the valuable comments and
10  * suggestions.  And thanks to Cunming Liang and Zhihong Wang for all
11  * their supports.
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/cdev.h>
17 #include <linux/device.h>
18 #include <linux/mm.h>
19 #include <linux/slab.h>
20 #include <linux/iommu.h>
21 #include <linux/uuid.h>
22 #include <linux/vdpa.h>
23 #include <linux/nospec.h>
24 #include <linux/vhost.h>
25
26 #include "vhost.h"
27
28 enum {
29         VHOST_VDPA_BACKEND_FEATURES =
30         (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
31         (1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
32         (1ULL << VHOST_BACKEND_F_IOTLB_ASID),
33 };
34
35 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
36
37 #define VHOST_VDPA_IOTLB_BUCKETS 16
38
39 struct vhost_vdpa_as {
40         struct hlist_node hash_link;
41         struct vhost_iotlb iotlb;
42         u32 id;
43 };
44
45 struct vhost_vdpa {
46         struct vhost_dev vdev;
47         struct iommu_domain *domain;
48         struct vhost_virtqueue *vqs;
49         struct completion completion;
50         struct vdpa_device *vdpa;
51         struct hlist_head as[VHOST_VDPA_IOTLB_BUCKETS];
52         struct device dev;
53         struct cdev cdev;
54         atomic_t opened;
55         u32 nvqs;
56         int virtio_id;
57         int minor;
58         struct eventfd_ctx *config_ctx;
59         int in_batch;
60         struct vdpa_iova_range range;
61         u32 batch_asid;
62 };
63
64 static DEFINE_IDA(vhost_vdpa_ida);
65
66 static dev_t vhost_vdpa_major;
67
68 static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
69 {
70         struct vhost_vdpa_as *as = container_of(iotlb, struct
71                                                 vhost_vdpa_as, iotlb);
72         return as->id;
73 }
74
75 static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
76 {
77         struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
78         struct vhost_vdpa_as *as;
79
80         hlist_for_each_entry(as, head, hash_link)
81                 if (as->id == asid)
82                         return as;
83
84         return NULL;
85 }
86
87 static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid)
88 {
89         struct vhost_vdpa_as *as = asid_to_as(v, asid);
90
91         if (!as)
92                 return NULL;
93
94         return &as->iotlb;
95 }
96
97 static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
98 {
99         struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
100         struct vhost_vdpa_as *as;
101
102         if (asid_to_as(v, asid))
103                 return NULL;
104
105         if (asid >= v->vdpa->nas)
106                 return NULL;
107
108         as = kmalloc(sizeof(*as), GFP_KERNEL);
109         if (!as)
110                 return NULL;
111
112         vhost_iotlb_init(&as->iotlb, 0, 0);
113         as->id = asid;
114         hlist_add_head(&as->hash_link, head);
115
116         return as;
117 }
118
119 static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
120                                                       u32 asid)
121 {
122         struct vhost_vdpa_as *as = asid_to_as(v, asid);
123
124         if (as)
125                 return as;
126
127         return vhost_vdpa_alloc_as(v, asid);
128 }
129
130 static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
131 {
132         struct vhost_vdpa_as *as = asid_to_as(v, asid);
133
134         if (!as)
135                 return -EINVAL;
136
137         hlist_del(&as->hash_link);
138         vhost_iotlb_reset(&as->iotlb);
139         kfree(as);
140
141         return 0;
142 }
143
144 static void handle_vq_kick(struct vhost_work *work)
145 {
146         struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
147                                                   poll.work);
148         struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
149         const struct vdpa_config_ops *ops = v->vdpa->config;
150
151         ops->kick_vq(v->vdpa, vq - v->vqs);
152 }
153
154 static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
155 {
156         struct vhost_virtqueue *vq = private;
157         struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
158
159         if (call_ctx)
160                 eventfd_signal(call_ctx, 1);
161
162         return IRQ_HANDLED;
163 }
164
165 static irqreturn_t vhost_vdpa_config_cb(void *private)
166 {
167         struct vhost_vdpa *v = private;
168         struct eventfd_ctx *config_ctx = v->config_ctx;
169
170         if (config_ctx)
171                 eventfd_signal(config_ctx, 1);
172
173         return IRQ_HANDLED;
174 }
175
176 static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
177 {
178         struct vhost_virtqueue *vq = &v->vqs[qid];
179         const struct vdpa_config_ops *ops = v->vdpa->config;
180         struct vdpa_device *vdpa = v->vdpa;
181         int ret, irq;
182
183         if (!ops->get_vq_irq)
184                 return;
185
186         irq = ops->get_vq_irq(vdpa, qid);
187         if (irq < 0)
188                 return;
189
190         irq_bypass_unregister_producer(&vq->call_ctx.producer);
191         if (!vq->call_ctx.ctx)
192                 return;
193
194         vq->call_ctx.producer.token = vq->call_ctx.ctx;
195         vq->call_ctx.producer.irq = irq;
196         ret = irq_bypass_register_producer(&vq->call_ctx.producer);
197         if (unlikely(ret))
198                 dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret =  %d\n",
199                          qid, vq->call_ctx.producer.token, ret);
200 }
201
202 static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
203 {
204         struct vhost_virtqueue *vq = &v->vqs[qid];
205
206         irq_bypass_unregister_producer(&vq->call_ctx.producer);
207 }
208
209 static int vhost_vdpa_reset(struct vhost_vdpa *v)
210 {
211         struct vdpa_device *vdpa = v->vdpa;
212
213         v->in_batch = 0;
214
215         return vdpa_reset(vdpa);
216 }
217
218 static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
219 {
220         struct vdpa_device *vdpa = v->vdpa;
221         const struct vdpa_config_ops *ops = vdpa->config;
222         u32 device_id;
223
224         device_id = ops->get_device_id(vdpa);
225
226         if (copy_to_user(argp, &device_id, sizeof(device_id)))
227                 return -EFAULT;
228
229         return 0;
230 }
231
232 static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
233 {
234         struct vdpa_device *vdpa = v->vdpa;
235         const struct vdpa_config_ops *ops = vdpa->config;
236         u8 status;
237
238         status = ops->get_status(vdpa);
239
240         if (copy_to_user(statusp, &status, sizeof(status)))
241                 return -EFAULT;
242
243         return 0;
244 }
245
246 static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
247 {
248         struct vdpa_device *vdpa = v->vdpa;
249         const struct vdpa_config_ops *ops = vdpa->config;
250         u8 status, status_old;
251         u32 nvqs = v->nvqs;
252         int ret;
253         u16 i;
254
255         if (copy_from_user(&status, statusp, sizeof(status)))
256                 return -EFAULT;
257
258         status_old = ops->get_status(vdpa);
259
260         /*
261          * Userspace shouldn't remove status bits unless reset the
262          * status to 0.
263          */
264         if (status != 0 && (status_old & ~status) != 0)
265                 return -EINVAL;
266
267         if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
268                 for (i = 0; i < nvqs; i++)
269                         vhost_vdpa_unsetup_vq_irq(v, i);
270
271         if (status == 0) {
272                 ret = vdpa_reset(vdpa);
273                 if (ret)
274                         return ret;
275         } else
276                 vdpa_set_status(vdpa, status);
277
278         if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
279                 for (i = 0; i < nvqs; i++)
280                         vhost_vdpa_setup_vq_irq(v, i);
281
282         return 0;
283 }
284
285 static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
286                                       struct vhost_vdpa_config *c)
287 {
288         struct vdpa_device *vdpa = v->vdpa;
289         size_t size = vdpa->config->get_config_size(vdpa);
290
291         if (c->len == 0 || c->off > size)
292                 return -EINVAL;
293
294         if (c->len > size - c->off)
295                 return -E2BIG;
296
297         return 0;
298 }
299
300 static long vhost_vdpa_get_config(struct vhost_vdpa *v,
301                                   struct vhost_vdpa_config __user *c)
302 {
303         struct vdpa_device *vdpa = v->vdpa;
304         struct vhost_vdpa_config config;
305         unsigned long size = offsetof(struct vhost_vdpa_config, buf);
306         u8 *buf;
307
308         if (copy_from_user(&config, c, size))
309                 return -EFAULT;
310         if (vhost_vdpa_config_validate(v, &config))
311                 return -EINVAL;
312         buf = kvzalloc(config.len, GFP_KERNEL);
313         if (!buf)
314                 return -ENOMEM;
315
316         vdpa_get_config(vdpa, config.off, buf, config.len);
317
318         if (copy_to_user(c->buf, buf, config.len)) {
319                 kvfree(buf);
320                 return -EFAULT;
321         }
322
323         kvfree(buf);
324         return 0;
325 }
326
327 static long vhost_vdpa_set_config(struct vhost_vdpa *v,
328                                   struct vhost_vdpa_config __user *c)
329 {
330         struct vdpa_device *vdpa = v->vdpa;
331         struct vhost_vdpa_config config;
332         unsigned long size = offsetof(struct vhost_vdpa_config, buf);
333         u8 *buf;
334
335         if (copy_from_user(&config, c, size))
336                 return -EFAULT;
337         if (vhost_vdpa_config_validate(v, &config))
338                 return -EINVAL;
339
340         buf = vmemdup_user(c->buf, config.len);
341         if (IS_ERR(buf))
342                 return PTR_ERR(buf);
343
344         vdpa_set_config(vdpa, config.off, buf, config.len);
345
346         kvfree(buf);
347         return 0;
348 }
349
350 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
351 {
352         struct vdpa_device *vdpa = v->vdpa;
353         const struct vdpa_config_ops *ops = vdpa->config;
354         u64 features;
355
356         features = ops->get_device_features(vdpa);
357
358         if (copy_to_user(featurep, &features, sizeof(features)))
359                 return -EFAULT;
360
361         return 0;
362 }
363
364 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
365 {
366         struct vdpa_device *vdpa = v->vdpa;
367         const struct vdpa_config_ops *ops = vdpa->config;
368         u64 features;
369
370         /*
371          * It's not allowed to change the features after they have
372          * been negotiated.
373          */
374         if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
375                 return -EBUSY;
376
377         if (copy_from_user(&features, featurep, sizeof(features)))
378                 return -EFAULT;
379
380         if (vdpa_set_features(vdpa, features))
381                 return -EINVAL;
382
383         return 0;
384 }
385
386 static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
387 {
388         struct vdpa_device *vdpa = v->vdpa;
389         const struct vdpa_config_ops *ops = vdpa->config;
390         u16 num;
391
392         num = ops->get_vq_num_max(vdpa);
393
394         if (copy_to_user(argp, &num, sizeof(num)))
395                 return -EFAULT;
396
397         return 0;
398 }
399
400 static void vhost_vdpa_config_put(struct vhost_vdpa *v)
401 {
402         if (v->config_ctx) {
403                 eventfd_ctx_put(v->config_ctx);
404                 v->config_ctx = NULL;
405         }
406 }
407
408 static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
409 {
410         struct vdpa_callback cb;
411         int fd;
412         struct eventfd_ctx *ctx;
413
414         cb.callback = vhost_vdpa_config_cb;
415         cb.private = v;
416         if (copy_from_user(&fd, argp, sizeof(fd)))
417                 return  -EFAULT;
418
419         ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
420         swap(ctx, v->config_ctx);
421
422         if (!IS_ERR_OR_NULL(ctx))
423                 eventfd_ctx_put(ctx);
424
425         if (IS_ERR(v->config_ctx)) {
426                 long ret = PTR_ERR(v->config_ctx);
427
428                 v->config_ctx = NULL;
429                 return ret;
430         }
431
432         v->vdpa->config->set_config_cb(v->vdpa, &cb);
433
434         return 0;
435 }
436
437 static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
438 {
439         struct vhost_vdpa_iova_range range = {
440                 .first = v->range.first,
441                 .last = v->range.last,
442         };
443
444         if (copy_to_user(argp, &range, sizeof(range)))
445                 return -EFAULT;
446         return 0;
447 }
448
449 static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp)
450 {
451         struct vdpa_device *vdpa = v->vdpa;
452         const struct vdpa_config_ops *ops = vdpa->config;
453         u32 size;
454
455         size = ops->get_config_size(vdpa);
456
457         if (copy_to_user(argp, &size, sizeof(size)))
458                 return -EFAULT;
459
460         return 0;
461 }
462
463 static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
464 {
465         struct vdpa_device *vdpa = v->vdpa;
466
467         if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs)))
468                 return -EFAULT;
469
470         return 0;
471 }
472
473 static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
474                                    void __user *argp)
475 {
476         struct vdpa_device *vdpa = v->vdpa;
477         const struct vdpa_config_ops *ops = vdpa->config;
478         struct vdpa_vq_state vq_state;
479         struct vdpa_callback cb;
480         struct vhost_virtqueue *vq;
481         struct vhost_vring_state s;
482         u32 idx;
483         long r;
484
485         r = get_user(idx, (u32 __user *)argp);
486         if (r < 0)
487                 return r;
488
489         if (idx >= v->nvqs)
490                 return -ENOBUFS;
491
492         idx = array_index_nospec(idx, v->nvqs);
493         vq = &v->vqs[idx];
494
495         switch (cmd) {
496         case VHOST_VDPA_SET_VRING_ENABLE:
497                 if (copy_from_user(&s, argp, sizeof(s)))
498                         return -EFAULT;
499                 ops->set_vq_ready(vdpa, idx, s.num);
500                 return 0;
501         case VHOST_VDPA_GET_VRING_GROUP:
502                 s.index = idx;
503                 s.num = ops->get_vq_group(vdpa, idx);
504                 if (s.num >= vdpa->ngroups)
505                         return -EIO;
506                 else if (copy_to_user(argp, &s, sizeof(s)))
507                         return -EFAULT;
508                 return 0;
509         case VHOST_VDPA_SET_GROUP_ASID:
510                 if (copy_from_user(&s, argp, sizeof(s)))
511                         return -EFAULT;
512                 if (s.num >= vdpa->nas)
513                         return -EINVAL;
514                 if (!ops->set_group_asid)
515                         return -EOPNOTSUPP;
516                 return ops->set_group_asid(vdpa, idx, s.num);
517         case VHOST_GET_VRING_BASE:
518                 r = ops->get_vq_state(v->vdpa, idx, &vq_state);
519                 if (r)
520                         return r;
521
522                 vq->last_avail_idx = vq_state.split.avail_index;
523                 break;
524         }
525
526         r = vhost_vring_ioctl(&v->vdev, cmd, argp);
527         if (r)
528                 return r;
529
530         switch (cmd) {
531         case VHOST_SET_VRING_ADDR:
532                 if (ops->set_vq_address(vdpa, idx,
533                                         (u64)(uintptr_t)vq->desc,
534                                         (u64)(uintptr_t)vq->avail,
535                                         (u64)(uintptr_t)vq->used))
536                         r = -EINVAL;
537                 break;
538
539         case VHOST_SET_VRING_BASE:
540                 vq_state.split.avail_index = vq->last_avail_idx;
541                 if (ops->set_vq_state(vdpa, idx, &vq_state))
542                         r = -EINVAL;
543                 break;
544
545         case VHOST_SET_VRING_CALL:
546                 if (vq->call_ctx.ctx) {
547                         cb.callback = vhost_vdpa_virtqueue_cb;
548                         cb.private = vq;
549                 } else {
550                         cb.callback = NULL;
551                         cb.private = NULL;
552                 }
553                 ops->set_vq_cb(vdpa, idx, &cb);
554                 vhost_vdpa_setup_vq_irq(v, idx);
555                 break;
556
557         case VHOST_SET_VRING_NUM:
558                 ops->set_vq_num(vdpa, idx, vq->num);
559                 break;
560         }
561
562         return r;
563 }
564
565 static long vhost_vdpa_unlocked_ioctl(struct file *filep,
566                                       unsigned int cmd, unsigned long arg)
567 {
568         struct vhost_vdpa *v = filep->private_data;
569         struct vhost_dev *d = &v->vdev;
570         void __user *argp = (void __user *)arg;
571         u64 __user *featurep = argp;
572         u64 features;
573         long r = 0;
574
575         if (cmd == VHOST_SET_BACKEND_FEATURES) {
576                 if (copy_from_user(&features, featurep, sizeof(features)))
577                         return -EFAULT;
578                 if (features & ~VHOST_VDPA_BACKEND_FEATURES)
579                         return -EOPNOTSUPP;
580                 vhost_set_backend_features(&v->vdev, features);
581                 return 0;
582         }
583
584         mutex_lock(&d->mutex);
585
586         switch (cmd) {
587         case VHOST_VDPA_GET_DEVICE_ID:
588                 r = vhost_vdpa_get_device_id(v, argp);
589                 break;
590         case VHOST_VDPA_GET_STATUS:
591                 r = vhost_vdpa_get_status(v, argp);
592                 break;
593         case VHOST_VDPA_SET_STATUS:
594                 r = vhost_vdpa_set_status(v, argp);
595                 break;
596         case VHOST_VDPA_GET_CONFIG:
597                 r = vhost_vdpa_get_config(v, argp);
598                 break;
599         case VHOST_VDPA_SET_CONFIG:
600                 r = vhost_vdpa_set_config(v, argp);
601                 break;
602         case VHOST_GET_FEATURES:
603                 r = vhost_vdpa_get_features(v, argp);
604                 break;
605         case VHOST_SET_FEATURES:
606                 r = vhost_vdpa_set_features(v, argp);
607                 break;
608         case VHOST_VDPA_GET_VRING_NUM:
609                 r = vhost_vdpa_get_vring_num(v, argp);
610                 break;
611         case VHOST_VDPA_GET_GROUP_NUM:
612                 if (copy_to_user(argp, &v->vdpa->ngroups,
613                                  sizeof(v->vdpa->ngroups)))
614                         r = -EFAULT;
615                 break;
616         case VHOST_VDPA_GET_AS_NUM:
617                 if (copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas)))
618                         r = -EFAULT;
619                 break;
620         case VHOST_SET_LOG_BASE:
621         case VHOST_SET_LOG_FD:
622                 r = -ENOIOCTLCMD;
623                 break;
624         case VHOST_VDPA_SET_CONFIG_CALL:
625                 r = vhost_vdpa_set_config_call(v, argp);
626                 break;
627         case VHOST_GET_BACKEND_FEATURES:
628                 features = VHOST_VDPA_BACKEND_FEATURES;
629                 if (copy_to_user(featurep, &features, sizeof(features)))
630                         r = -EFAULT;
631                 break;
632         case VHOST_VDPA_GET_IOVA_RANGE:
633                 r = vhost_vdpa_get_iova_range(v, argp);
634                 break;
635         case VHOST_VDPA_GET_CONFIG_SIZE:
636                 r = vhost_vdpa_get_config_size(v, argp);
637                 break;
638         case VHOST_VDPA_GET_VQS_COUNT:
639                 r = vhost_vdpa_get_vqs_count(v, argp);
640                 break;
641         default:
642                 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
643                 if (r == -ENOIOCTLCMD)
644                         r = vhost_vdpa_vring_ioctl(v, cmd, argp);
645                 break;
646         }
647
648         mutex_unlock(&d->mutex);
649         return r;
650 }
651
652 static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
653                                 struct vhost_iotlb *iotlb,
654                                 u64 start, u64 last)
655 {
656         struct vhost_dev *dev = &v->vdev;
657         struct vhost_iotlb_map *map;
658         struct page *page;
659         unsigned long pfn, pinned;
660
661         while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
662                 pinned = PFN_DOWN(map->size);
663                 for (pfn = PFN_DOWN(map->addr);
664                      pinned > 0; pfn++, pinned--) {
665                         page = pfn_to_page(pfn);
666                         if (map->perm & VHOST_ACCESS_WO)
667                                 set_page_dirty_lock(page);
668                         unpin_user_page(page);
669                 }
670                 atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
671                 vhost_iotlb_map_free(iotlb, map);
672         }
673 }
674
675 static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
676                                 struct vhost_iotlb *iotlb,
677                                 u64 start, u64 last)
678 {
679         struct vhost_iotlb_map *map;
680         struct vdpa_map_file *map_file;
681
682         while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
683                 map_file = (struct vdpa_map_file *)map->opaque;
684                 fput(map_file->file);
685                 kfree(map_file);
686                 vhost_iotlb_map_free(iotlb, map);
687         }
688 }
689
690 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
691                                    struct vhost_iotlb *iotlb,
692                                    u64 start, u64 last)
693 {
694         struct vdpa_device *vdpa = v->vdpa;
695
696         if (vdpa->use_va)
697                 return vhost_vdpa_va_unmap(v, iotlb, start, last);
698
699         return vhost_vdpa_pa_unmap(v, iotlb, start, last);
700 }
701
702 static int perm_to_iommu_flags(u32 perm)
703 {
704         int flags = 0;
705
706         switch (perm) {
707         case VHOST_ACCESS_WO:
708                 flags |= IOMMU_WRITE;
709                 break;
710         case VHOST_ACCESS_RO:
711                 flags |= IOMMU_READ;
712                 break;
713         case VHOST_ACCESS_RW:
714                 flags |= (IOMMU_WRITE | IOMMU_READ);
715                 break;
716         default:
717                 WARN(1, "invalidate vhost IOTLB permission\n");
718                 break;
719         }
720
721         return flags | IOMMU_CACHE;
722 }
723
724 static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
725                           u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
726 {
727         struct vhost_dev *dev = &v->vdev;
728         struct vdpa_device *vdpa = v->vdpa;
729         const struct vdpa_config_ops *ops = vdpa->config;
730         u32 asid = iotlb_to_asid(iotlb);
731         int r = 0;
732
733         r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
734                                       pa, perm, opaque);
735         if (r)
736                 return r;
737
738         if (ops->dma_map) {
739                 r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
740         } else if (ops->set_map) {
741                 if (!v->in_batch)
742                         r = ops->set_map(vdpa, asid, iotlb);
743         } else {
744                 r = iommu_map(v->domain, iova, pa, size,
745                               perm_to_iommu_flags(perm));
746         }
747         if (r) {
748                 vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
749                 return r;
750         }
751
752         if (!vdpa->use_va)
753                 atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
754
755         return 0;
756 }
757
758 static void vhost_vdpa_unmap(struct vhost_vdpa *v,
759                              struct vhost_iotlb *iotlb,
760                              u64 iova, u64 size)
761 {
762         struct vdpa_device *vdpa = v->vdpa;
763         const struct vdpa_config_ops *ops = vdpa->config;
764         u32 asid = iotlb_to_asid(iotlb);
765
766         vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
767
768         if (ops->dma_map) {
769                 ops->dma_unmap(vdpa, asid, iova, size);
770         } else if (ops->set_map) {
771                 if (!v->in_batch)
772                         ops->set_map(vdpa, asid, iotlb);
773         } else {
774                 iommu_unmap(v->domain, iova, size);
775         }
776
777         /* If we are in the middle of batch processing, delay the free
778          * of AS until BATCH_END.
779          */
780         if (!v->in_batch && !iotlb->nmaps)
781                 vhost_vdpa_remove_as(v, asid);
782 }
783
784 static int vhost_vdpa_va_map(struct vhost_vdpa *v,
785                              struct vhost_iotlb *iotlb,
786                              u64 iova, u64 size, u64 uaddr, u32 perm)
787 {
788         struct vhost_dev *dev = &v->vdev;
789         u64 offset, map_size, map_iova = iova;
790         struct vdpa_map_file *map_file;
791         struct vm_area_struct *vma;
792         int ret = 0;
793
794         mmap_read_lock(dev->mm);
795
796         while (size) {
797                 vma = find_vma(dev->mm, uaddr);
798                 if (!vma) {
799                         ret = -EINVAL;
800                         break;
801                 }
802                 map_size = min(size, vma->vm_end - uaddr);
803                 if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) &&
804                         !(vma->vm_flags & (VM_IO | VM_PFNMAP))))
805                         goto next;
806
807                 map_file = kzalloc(sizeof(*map_file), GFP_KERNEL);
808                 if (!map_file) {
809                         ret = -ENOMEM;
810                         break;
811                 }
812                 offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
813                 map_file->offset = offset;
814                 map_file->file = get_file(vma->vm_file);
815                 ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
816                                      perm, map_file);
817                 if (ret) {
818                         fput(map_file->file);
819                         kfree(map_file);
820                         break;
821                 }
822 next:
823                 size -= map_size;
824                 uaddr += map_size;
825                 map_iova += map_size;
826         }
827         if (ret)
828                 vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
829
830         mmap_read_unlock(dev->mm);
831
832         return ret;
833 }
834
835 static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
836                              struct vhost_iotlb *iotlb,
837                              u64 iova, u64 size, u64 uaddr, u32 perm)
838 {
839         struct vhost_dev *dev = &v->vdev;
840         struct page **page_list;
841         unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
842         unsigned int gup_flags = FOLL_LONGTERM;
843         unsigned long npages, cur_base, map_pfn, last_pfn = 0;
844         unsigned long lock_limit, sz2pin, nchunks, i;
845         u64 start = iova;
846         long pinned;
847         int ret = 0;
848
849         /* Limit the use of memory for bookkeeping */
850         page_list = (struct page **) __get_free_page(GFP_KERNEL);
851         if (!page_list)
852                 return -ENOMEM;
853
854         if (perm & VHOST_ACCESS_WO)
855                 gup_flags |= FOLL_WRITE;
856
857         npages = PFN_UP(size + (iova & ~PAGE_MASK));
858         if (!npages) {
859                 ret = -EINVAL;
860                 goto free;
861         }
862
863         mmap_read_lock(dev->mm);
864
865         lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
866         if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
867                 ret = -ENOMEM;
868                 goto unlock;
869         }
870
871         cur_base = uaddr & PAGE_MASK;
872         iova &= PAGE_MASK;
873         nchunks = 0;
874
875         while (npages) {
876                 sz2pin = min_t(unsigned long, npages, list_size);
877                 pinned = pin_user_pages(cur_base, sz2pin,
878                                         gup_flags, page_list, NULL);
879                 if (sz2pin != pinned) {
880                         if (pinned < 0) {
881                                 ret = pinned;
882                         } else {
883                                 unpin_user_pages(page_list, pinned);
884                                 ret = -ENOMEM;
885                         }
886                         goto out;
887                 }
888                 nchunks++;
889
890                 if (!last_pfn)
891                         map_pfn = page_to_pfn(page_list[0]);
892
893                 for (i = 0; i < pinned; i++) {
894                         unsigned long this_pfn = page_to_pfn(page_list[i]);
895                         u64 csize;
896
897                         if (last_pfn && (this_pfn != last_pfn + 1)) {
898                                 /* Pin a contiguous chunk of memory */
899                                 csize = PFN_PHYS(last_pfn - map_pfn + 1);
900                                 ret = vhost_vdpa_map(v, iotlb, iova, csize,
901                                                      PFN_PHYS(map_pfn),
902                                                      perm, NULL);
903                                 if (ret) {
904                                         /*
905                                          * Unpin the pages that are left unmapped
906                                          * from this point on in the current
907                                          * page_list. The remaining outstanding
908                                          * ones which may stride across several
909                                          * chunks will be covered in the common
910                                          * error path subsequently.
911                                          */
912                                         unpin_user_pages(&page_list[i],
913                                                          pinned - i);
914                                         goto out;
915                                 }
916
917                                 map_pfn = this_pfn;
918                                 iova += csize;
919                                 nchunks = 0;
920                         }
921
922                         last_pfn = this_pfn;
923                 }
924
925                 cur_base += PFN_PHYS(pinned);
926                 npages -= pinned;
927         }
928
929         /* Pin the rest chunk */
930         ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
931                              PFN_PHYS(map_pfn), perm, NULL);
932 out:
933         if (ret) {
934                 if (nchunks) {
935                         unsigned long pfn;
936
937                         /*
938                          * Unpin the outstanding pages which are yet to be
939                          * mapped but haven't due to vdpa_map() or
940                          * pin_user_pages() failure.
941                          *
942                          * Mapped pages are accounted in vdpa_map(), hence
943                          * the corresponding unpinning will be handled by
944                          * vdpa_unmap().
945                          */
946                         WARN_ON(!last_pfn);
947                         for (pfn = map_pfn; pfn <= last_pfn; pfn++)
948                                 unpin_user_page(pfn_to_page(pfn));
949                 }
950                 vhost_vdpa_unmap(v, iotlb, start, size);
951         }
952 unlock:
953         mmap_read_unlock(dev->mm);
954 free:
955         free_page((unsigned long)page_list);
956         return ret;
957
958 }
959
960 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
961                                            struct vhost_iotlb *iotlb,
962                                            struct vhost_iotlb_msg *msg)
963 {
964         struct vdpa_device *vdpa = v->vdpa;
965
966         if (msg->iova < v->range.first || !msg->size ||
967             msg->iova > U64_MAX - msg->size + 1 ||
968             msg->iova + msg->size - 1 > v->range.last)
969                 return -EINVAL;
970
971         if (vhost_iotlb_itree_first(iotlb, msg->iova,
972                                     msg->iova + msg->size - 1))
973                 return -EEXIST;
974
975         if (vdpa->use_va)
976                 return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
977                                          msg->uaddr, msg->perm);
978
979         return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
980                                  msg->perm);
981 }
982
983 static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
984                                         struct vhost_iotlb_msg *msg)
985 {
986         struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
987         struct vdpa_device *vdpa = v->vdpa;
988         const struct vdpa_config_ops *ops = vdpa->config;
989         struct vhost_iotlb *iotlb = NULL;
990         struct vhost_vdpa_as *as = NULL;
991         int r = 0;
992
993         mutex_lock(&dev->mutex);
994
995         r = vhost_dev_check_owner(dev);
996         if (r)
997                 goto unlock;
998
999         if (msg->type == VHOST_IOTLB_UPDATE ||
1000             msg->type == VHOST_IOTLB_BATCH_BEGIN) {
1001                 as = vhost_vdpa_find_alloc_as(v, asid);
1002                 if (!as) {
1003                         dev_err(&v->dev, "can't find and alloc asid %d\n",
1004                                 asid);
1005                         r = -EINVAL;
1006                         goto unlock;
1007                 }
1008                 iotlb = &as->iotlb;
1009         } else
1010                 iotlb = asid_to_iotlb(v, asid);
1011
1012         if ((v->in_batch && v->batch_asid != asid) || !iotlb) {
1013                 if (v->in_batch && v->batch_asid != asid) {
1014                         dev_info(&v->dev, "batch id %d asid %d\n",
1015                                  v->batch_asid, asid);
1016                 }
1017                 if (!iotlb)
1018                         dev_err(&v->dev, "no iotlb for asid %d\n", asid);
1019                 r = -EINVAL;
1020                 goto unlock;
1021         }
1022
1023         switch (msg->type) {
1024         case VHOST_IOTLB_UPDATE:
1025                 r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
1026                 break;
1027         case VHOST_IOTLB_INVALIDATE:
1028                 vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
1029                 break;
1030         case VHOST_IOTLB_BATCH_BEGIN:
1031                 v->batch_asid = asid;
1032                 v->in_batch = true;
1033                 break;
1034         case VHOST_IOTLB_BATCH_END:
1035                 if (v->in_batch && ops->set_map)
1036                         ops->set_map(vdpa, asid, iotlb);
1037                 v->in_batch = false;
1038                 if (!iotlb->nmaps)
1039                         vhost_vdpa_remove_as(v, asid);
1040                 break;
1041         default:
1042                 r = -EINVAL;
1043                 break;
1044         }
1045 unlock:
1046         mutex_unlock(&dev->mutex);
1047
1048         return r;
1049 }
1050
1051 static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
1052                                          struct iov_iter *from)
1053 {
1054         struct file *file = iocb->ki_filp;
1055         struct vhost_vdpa *v = file->private_data;
1056         struct vhost_dev *dev = &v->vdev;
1057
1058         return vhost_chr_write_iter(dev, from);
1059 }
1060
1061 static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
1062 {
1063         struct vdpa_device *vdpa = v->vdpa;
1064         const struct vdpa_config_ops *ops = vdpa->config;
1065         struct device *dma_dev = vdpa_get_dma_dev(vdpa);
1066         struct bus_type *bus;
1067         int ret;
1068
1069         /* Device want to do DMA by itself */
1070         if (ops->set_map || ops->dma_map)
1071                 return 0;
1072
1073         bus = dma_dev->bus;
1074         if (!bus)
1075                 return -EFAULT;
1076
1077         if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
1078                 return -ENOTSUPP;
1079
1080         v->domain = iommu_domain_alloc(bus);
1081         if (!v->domain)
1082                 return -EIO;
1083
1084         ret = iommu_attach_device(v->domain, dma_dev);
1085         if (ret)
1086                 goto err_attach;
1087
1088         return 0;
1089
1090 err_attach:
1091         iommu_domain_free(v->domain);
1092         return ret;
1093 }
1094
1095 static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
1096 {
1097         struct vdpa_device *vdpa = v->vdpa;
1098         struct device *dma_dev = vdpa_get_dma_dev(vdpa);
1099
1100         if (v->domain) {
1101                 iommu_detach_device(v->domain, dma_dev);
1102                 iommu_domain_free(v->domain);
1103         }
1104
1105         v->domain = NULL;
1106 }
1107
1108 static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
1109 {
1110         struct vdpa_iova_range *range = &v->range;
1111         struct vdpa_device *vdpa = v->vdpa;
1112         const struct vdpa_config_ops *ops = vdpa->config;
1113
1114         if (ops->get_iova_range) {
1115                 *range = ops->get_iova_range(vdpa);
1116         } else if (v->domain && v->domain->geometry.force_aperture) {
1117                 range->first = v->domain->geometry.aperture_start;
1118                 range->last = v->domain->geometry.aperture_end;
1119         } else {
1120                 range->first = 0;
1121                 range->last = ULLONG_MAX;
1122         }
1123 }
1124
1125 static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
1126 {
1127         struct vhost_vdpa_as *as;
1128         u32 asid;
1129
1130         vhost_dev_cleanup(&v->vdev);
1131         kfree(v->vdev.vqs);
1132
1133         for (asid = 0; asid < v->vdpa->nas; asid++) {
1134                 as = asid_to_as(v, asid);
1135                 if (as)
1136                         vhost_vdpa_remove_as(v, asid);
1137         }
1138 }
1139
1140 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
1141 {
1142         struct vhost_vdpa *v;
1143         struct vhost_dev *dev;
1144         struct vhost_virtqueue **vqs;
1145         int r, opened;
1146         u32 i, nvqs;
1147
1148         v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
1149
1150         opened = atomic_cmpxchg(&v->opened, 0, 1);
1151         if (opened)
1152                 return -EBUSY;
1153
1154         nvqs = v->nvqs;
1155         r = vhost_vdpa_reset(v);
1156         if (r)
1157                 goto err;
1158
1159         vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
1160         if (!vqs) {
1161                 r = -ENOMEM;
1162                 goto err;
1163         }
1164
1165         dev = &v->vdev;
1166         for (i = 0; i < nvqs; i++) {
1167                 vqs[i] = &v->vqs[i];
1168                 vqs[i]->handle_kick = handle_vq_kick;
1169         }
1170         vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
1171                        vhost_vdpa_process_iotlb_msg);
1172
1173         r = vhost_vdpa_alloc_domain(v);
1174         if (r)
1175                 goto err_alloc_domain;
1176
1177         vhost_vdpa_set_iova_range(v);
1178
1179         filep->private_data = v;
1180
1181         return 0;
1182
1183 err_alloc_domain:
1184         vhost_vdpa_cleanup(v);
1185 err:
1186         atomic_dec(&v->opened);
1187         return r;
1188 }
1189
1190 static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
1191 {
1192         u32 i;
1193
1194         for (i = 0; i < v->nvqs; i++)
1195                 vhost_vdpa_unsetup_vq_irq(v, i);
1196 }
1197
1198 static int vhost_vdpa_release(struct inode *inode, struct file *filep)
1199 {
1200         struct vhost_vdpa *v = filep->private_data;
1201         struct vhost_dev *d = &v->vdev;
1202
1203         mutex_lock(&d->mutex);
1204         filep->private_data = NULL;
1205         vhost_vdpa_clean_irq(v);
1206         vhost_vdpa_reset(v);
1207         vhost_dev_stop(&v->vdev);
1208         vhost_vdpa_free_domain(v);
1209         vhost_vdpa_config_put(v);
1210         vhost_dev_cleanup(&v->vdev);
1211         mutex_unlock(&d->mutex);
1212
1213         atomic_dec(&v->opened);
1214         complete(&v->completion);
1215
1216         return 0;
1217 }
1218
1219 #ifdef CONFIG_MMU
1220 static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
1221 {
1222         struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
1223         struct vdpa_device *vdpa = v->vdpa;
1224         const struct vdpa_config_ops *ops = vdpa->config;
1225         struct vdpa_notification_area notify;
1226         struct vm_area_struct *vma = vmf->vma;
1227         u16 index = vma->vm_pgoff;
1228
1229         notify = ops->get_vq_notification(vdpa, index);
1230
1231         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1232         if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
1233                             PFN_DOWN(notify.addr), PAGE_SIZE,
1234                             vma->vm_page_prot))
1235                 return VM_FAULT_SIGBUS;
1236
1237         return VM_FAULT_NOPAGE;
1238 }
1239
1240 static const struct vm_operations_struct vhost_vdpa_vm_ops = {
1241         .fault = vhost_vdpa_fault,
1242 };
1243
1244 static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
1245 {
1246         struct vhost_vdpa *v = vma->vm_file->private_data;
1247         struct vdpa_device *vdpa = v->vdpa;
1248         const struct vdpa_config_ops *ops = vdpa->config;
1249         struct vdpa_notification_area notify;
1250         unsigned long index = vma->vm_pgoff;
1251
1252         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1253                 return -EINVAL;
1254         if ((vma->vm_flags & VM_SHARED) == 0)
1255                 return -EINVAL;
1256         if (vma->vm_flags & VM_READ)
1257                 return -EINVAL;
1258         if (index > 65535)
1259                 return -EINVAL;
1260         if (!ops->get_vq_notification)
1261                 return -ENOTSUPP;
1262
1263         /* To be safe and easily modelled by userspace, We only
1264          * support the doorbell which sits on the page boundary and
1265          * does not share the page with other registers.
1266          */
1267         notify = ops->get_vq_notification(vdpa, index);
1268         if (notify.addr & (PAGE_SIZE - 1))
1269                 return -EINVAL;
1270         if (vma->vm_end - vma->vm_start != notify.size)
1271                 return -ENOTSUPP;
1272
1273         vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1274         vma->vm_ops = &vhost_vdpa_vm_ops;
1275         return 0;
1276 }
1277 #endif /* CONFIG_MMU */
1278
1279 static const struct file_operations vhost_vdpa_fops = {
1280         .owner          = THIS_MODULE,
1281         .open           = vhost_vdpa_open,
1282         .release        = vhost_vdpa_release,
1283         .write_iter     = vhost_vdpa_chr_write_iter,
1284         .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
1285 #ifdef CONFIG_MMU
1286         .mmap           = vhost_vdpa_mmap,
1287 #endif /* CONFIG_MMU */
1288         .compat_ioctl   = compat_ptr_ioctl,
1289 };
1290
1291 static void vhost_vdpa_release_dev(struct device *device)
1292 {
1293         struct vhost_vdpa *v =
1294                container_of(device, struct vhost_vdpa, dev);
1295
1296         ida_simple_remove(&vhost_vdpa_ida, v->minor);
1297         kfree(v->vqs);
1298         kfree(v);
1299 }
1300
1301 static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1302 {
1303         const struct vdpa_config_ops *ops = vdpa->config;
1304         struct vhost_vdpa *v;
1305         int minor;
1306         int i, r;
1307
1308         /* We can't support platform IOMMU device with more than 1
1309          * group or as
1310          */
1311         if (!ops->set_map && !ops->dma_map &&
1312             (vdpa->ngroups > 1 || vdpa->nas > 1))
1313                 return -EOPNOTSUPP;
1314
1315         v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1316         if (!v)
1317                 return -ENOMEM;
1318
1319         minor = ida_simple_get(&vhost_vdpa_ida, 0,
1320                                VHOST_VDPA_DEV_MAX, GFP_KERNEL);
1321         if (minor < 0) {
1322                 kfree(v);
1323                 return minor;
1324         }
1325
1326         atomic_set(&v->opened, 0);
1327         v->minor = minor;
1328         v->vdpa = vdpa;
1329         v->nvqs = vdpa->nvqs;
1330         v->virtio_id = ops->get_device_id(vdpa);
1331
1332         device_initialize(&v->dev);
1333         v->dev.release = vhost_vdpa_release_dev;
1334         v->dev.parent = &vdpa->dev;
1335         v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
1336         v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1337                                GFP_KERNEL);
1338         if (!v->vqs) {
1339                 r = -ENOMEM;
1340                 goto err;
1341         }
1342
1343         r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1344         if (r)
1345                 goto err;
1346
1347         cdev_init(&v->cdev, &vhost_vdpa_fops);
1348         v->cdev.owner = THIS_MODULE;
1349
1350         r = cdev_device_add(&v->cdev, &v->dev);
1351         if (r)
1352                 goto err;
1353
1354         init_completion(&v->completion);
1355         vdpa_set_drvdata(vdpa, v);
1356
1357         for (i = 0; i < VHOST_VDPA_IOTLB_BUCKETS; i++)
1358                 INIT_HLIST_HEAD(&v->as[i]);
1359
1360         return 0;
1361
1362 err:
1363         put_device(&v->dev);
1364         return r;
1365 }
1366
1367 static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1368 {
1369         struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1370         int opened;
1371
1372         cdev_device_del(&v->cdev, &v->dev);
1373
1374         do {
1375                 opened = atomic_cmpxchg(&v->opened, 0, 1);
1376                 if (!opened)
1377                         break;
1378                 wait_for_completion(&v->completion);
1379         } while (1);
1380
1381         put_device(&v->dev);
1382 }
1383
1384 static struct vdpa_driver vhost_vdpa_driver = {
1385         .driver = {
1386                 .name   = "vhost_vdpa",
1387         },
1388         .probe  = vhost_vdpa_probe,
1389         .remove = vhost_vdpa_remove,
1390 };
1391
1392 static int __init vhost_vdpa_init(void)
1393 {
1394         int r;
1395
1396         r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1397                                 "vhost-vdpa");
1398         if (r)
1399                 goto err_alloc_chrdev;
1400
1401         r = vdpa_register_driver(&vhost_vdpa_driver);
1402         if (r)
1403                 goto err_vdpa_register_driver;
1404
1405         return 0;
1406
1407 err_vdpa_register_driver:
1408         unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1409 err_alloc_chrdev:
1410         return r;
1411 }
1412 module_init(vhost_vdpa_init);
1413
1414 static void __exit vhost_vdpa_exit(void)
1415 {
1416         vdpa_unregister_driver(&vhost_vdpa_driver);
1417         unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1418 }
1419 module_exit(vhost_vdpa_exit);
1420
1421 MODULE_VERSION("0.0.1");
1422 MODULE_LICENSE("GPL v2");
1423 MODULE_AUTHOR("Intel Corporation");
1424 MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");