Merge tag 'drivers-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / drivers / vdpa / vdpa_sim / vdpa_sim.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDPA device simulator core.
4  *
5  * Copyright (c) 2020, Red Hat Inc. All rights reserved.
6  *     Author: Jason Wang <jasowang@redhat.com>
7  *
8  */
9
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/sched.h>
16 #include <linux/dma-map-ops.h>
17 #include <linux/vringh.h>
18 #include <linux/vdpa.h>
19 #include <linux/vhost_iotlb.h>
20 #include <linux/iova.h>
21
22 #include "vdpa_sim.h"
23
24 #define DRV_VERSION  "0.1"
25 #define DRV_AUTHOR   "Jason Wang <jasowang@redhat.com>"
26 #define DRV_DESC     "vDPA Device Simulator core"
27 #define DRV_LICENSE  "GPL v2"
28
29 static int batch_mapping = 1;
30 module_param(batch_mapping, int, 0444);
31 MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
32
33 static int max_iotlb_entries = 2048;
34 module_param(max_iotlb_entries, int, 0444);
35 MODULE_PARM_DESC(max_iotlb_entries,
36                  "Maximum number of iotlb entries. 0 means unlimited. (default: 2048)");
37
38 #define VDPASIM_QUEUE_ALIGN PAGE_SIZE
39 #define VDPASIM_QUEUE_MAX 256
40 #define VDPASIM_VENDOR_ID 0
41
42 static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
43 {
44         return container_of(vdpa, struct vdpasim, vdpa);
45 }
46
47 static struct vdpasim *dev_to_sim(struct device *dev)
48 {
49         struct vdpa_device *vdpa = dev_to_vdpa(dev);
50
51         return vdpa_to_sim(vdpa);
52 }
53
54 static void vdpasim_vq_notify(struct vringh *vring)
55 {
56         struct vdpasim_virtqueue *vq =
57                 container_of(vring, struct vdpasim_virtqueue, vring);
58
59         if (!vq->cb)
60                 return;
61
62         vq->cb(vq->private);
63 }
64
65 static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
66 {
67         struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
68
69         vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
70                           VDPASIM_QUEUE_MAX, false,
71                           (struct vring_desc *)(uintptr_t)vq->desc_addr,
72                           (struct vring_avail *)
73                           (uintptr_t)vq->driver_addr,
74                           (struct vring_used *)
75                           (uintptr_t)vq->device_addr);
76
77         vq->vring.notify = vdpasim_vq_notify;
78 }
79
80 static void vdpasim_vq_reset(struct vdpasim *vdpasim,
81                              struct vdpasim_virtqueue *vq)
82 {
83         vq->ready = false;
84         vq->desc_addr = 0;
85         vq->driver_addr = 0;
86         vq->device_addr = 0;
87         vq->cb = NULL;
88         vq->private = NULL;
89         vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
90                           VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
91
92         vq->vring.notify = NULL;
93 }
94
95 static void vdpasim_reset(struct vdpasim *vdpasim)
96 {
97         int i;
98
99         for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
100                 vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
101
102         spin_lock(&vdpasim->iommu_lock);
103         vhost_iotlb_reset(vdpasim->iommu);
104         spin_unlock(&vdpasim->iommu_lock);
105
106         vdpasim->features = 0;
107         vdpasim->status = 0;
108         ++vdpasim->generation;
109 }
110
111 static int dir_to_perm(enum dma_data_direction dir)
112 {
113         int perm = -EFAULT;
114
115         switch (dir) {
116         case DMA_FROM_DEVICE:
117                 perm = VHOST_MAP_WO;
118                 break;
119         case DMA_TO_DEVICE:
120                 perm = VHOST_MAP_RO;
121                 break;
122         case DMA_BIDIRECTIONAL:
123                 perm = VHOST_MAP_RW;
124                 break;
125         default:
126                 break;
127         }
128
129         return perm;
130 }
131
132 static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr,
133                                     size_t size, unsigned int perm)
134 {
135         struct iova *iova;
136         dma_addr_t dma_addr;
137         int ret;
138
139         /* We set the limit_pfn to the maximum (ULONG_MAX - 1) */
140         iova = alloc_iova(&vdpasim->iova, size, ULONG_MAX - 1, true);
141         if (!iova)
142                 return DMA_MAPPING_ERROR;
143
144         dma_addr = iova_dma_addr(&vdpasim->iova, iova);
145
146         spin_lock(&vdpasim->iommu_lock);
147         ret = vhost_iotlb_add_range(vdpasim->iommu, (u64)dma_addr,
148                                     (u64)dma_addr + size - 1, (u64)paddr, perm);
149         spin_unlock(&vdpasim->iommu_lock);
150
151         if (ret) {
152                 __free_iova(&vdpasim->iova, iova);
153                 return DMA_MAPPING_ERROR;
154         }
155
156         return dma_addr;
157 }
158
159 static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr,
160                                 size_t size)
161 {
162         spin_lock(&vdpasim->iommu_lock);
163         vhost_iotlb_del_range(vdpasim->iommu, (u64)dma_addr,
164                               (u64)dma_addr + size - 1);
165         spin_unlock(&vdpasim->iommu_lock);
166
167         free_iova(&vdpasim->iova, iova_pfn(&vdpasim->iova, dma_addr));
168 }
169
170 static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
171                                    unsigned long offset, size_t size,
172                                    enum dma_data_direction dir,
173                                    unsigned long attrs)
174 {
175         struct vdpasim *vdpasim = dev_to_sim(dev);
176         phys_addr_t paddr = page_to_phys(page) + offset;
177         int perm = dir_to_perm(dir);
178
179         if (perm < 0)
180                 return DMA_MAPPING_ERROR;
181
182         return vdpasim_map_range(vdpasim, paddr, size, perm);
183 }
184
185 static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
186                                size_t size, enum dma_data_direction dir,
187                                unsigned long attrs)
188 {
189         struct vdpasim *vdpasim = dev_to_sim(dev);
190
191         vdpasim_unmap_range(vdpasim, dma_addr, size);
192 }
193
194 static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
195                                     dma_addr_t *dma_addr, gfp_t flag,
196                                     unsigned long attrs)
197 {
198         struct vdpasim *vdpasim = dev_to_sim(dev);
199         phys_addr_t paddr;
200         void *addr;
201
202         addr = kmalloc(size, flag);
203         if (!addr) {
204                 *dma_addr = DMA_MAPPING_ERROR;
205                 return NULL;
206         }
207
208         paddr = virt_to_phys(addr);
209
210         *dma_addr = vdpasim_map_range(vdpasim, paddr, size, VHOST_MAP_RW);
211         if (*dma_addr == DMA_MAPPING_ERROR) {
212                 kfree(addr);
213                 return NULL;
214         }
215
216         return addr;
217 }
218
219 static void vdpasim_free_coherent(struct device *dev, size_t size,
220                                   void *vaddr, dma_addr_t dma_addr,
221                                   unsigned long attrs)
222 {
223         struct vdpasim *vdpasim = dev_to_sim(dev);
224
225         vdpasim_unmap_range(vdpasim, dma_addr, size);
226
227         kfree(vaddr);
228 }
229
230 static const struct dma_map_ops vdpasim_dma_ops = {
231         .map_page = vdpasim_map_page,
232         .unmap_page = vdpasim_unmap_page,
233         .alloc = vdpasim_alloc_coherent,
234         .free = vdpasim_free_coherent,
235 };
236
237 static const struct vdpa_config_ops vdpasim_config_ops;
238 static const struct vdpa_config_ops vdpasim_batch_config_ops;
239
240 struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
241 {
242         const struct vdpa_config_ops *ops;
243         struct vdpasim *vdpasim;
244         struct device *dev;
245         int i, ret = -ENOMEM;
246
247         if (batch_mapping)
248                 ops = &vdpasim_batch_config_ops;
249         else
250                 ops = &vdpasim_config_ops;
251
252         vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
253                                     dev_attr->name);
254         if (IS_ERR(vdpasim)) {
255                 ret = PTR_ERR(vdpasim);
256                 goto err_alloc;
257         }
258
259         vdpasim->dev_attr = *dev_attr;
260         INIT_WORK(&vdpasim->work, dev_attr->work_fn);
261         spin_lock_init(&vdpasim->lock);
262         spin_lock_init(&vdpasim->iommu_lock);
263
264         dev = &vdpasim->vdpa.dev;
265         dev->dma_mask = &dev->coherent_dma_mask;
266         if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
267                 goto err_iommu;
268         set_dma_ops(dev, &vdpasim_dma_ops);
269         vdpasim->vdpa.mdev = dev_attr->mgmt_dev;
270
271         vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
272         if (!vdpasim->config)
273                 goto err_iommu;
274
275         vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
276                                GFP_KERNEL);
277         if (!vdpasim->vqs)
278                 goto err_iommu;
279
280         vdpasim->iommu = vhost_iotlb_alloc(max_iotlb_entries, 0);
281         if (!vdpasim->iommu)
282                 goto err_iommu;
283
284         vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
285         if (!vdpasim->buffer)
286                 goto err_iommu;
287
288         for (i = 0; i < dev_attr->nvqs; i++)
289                 vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu,
290                                  &vdpasim->iommu_lock);
291
292         ret = iova_cache_get();
293         if (ret)
294                 goto err_iommu;
295
296         /* For simplicity we use an IOVA allocator with byte granularity */
297         init_iova_domain(&vdpasim->iova, 1, 0);
298
299         vdpasim->vdpa.dma_dev = dev;
300
301         return vdpasim;
302
303 err_iommu:
304         put_device(dev);
305 err_alloc:
306         return ERR_PTR(ret);
307 }
308 EXPORT_SYMBOL_GPL(vdpasim_create);
309
310 static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
311                                   u64 desc_area, u64 driver_area,
312                                   u64 device_area)
313 {
314         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
315         struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
316
317         vq->desc_addr = desc_area;
318         vq->driver_addr = driver_area;
319         vq->device_addr = device_area;
320
321         return 0;
322 }
323
324 static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
325 {
326         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
327         struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
328
329         vq->num = num;
330 }
331
332 static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
333 {
334         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
335         struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
336
337         if (vq->ready)
338                 schedule_work(&vdpasim->work);
339 }
340
341 static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
342                               struct vdpa_callback *cb)
343 {
344         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
345         struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
346
347         vq->cb = cb->callback;
348         vq->private = cb->private;
349 }
350
351 static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
352 {
353         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
354         struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
355
356         spin_lock(&vdpasim->lock);
357         vq->ready = ready;
358         if (vq->ready)
359                 vdpasim_queue_ready(vdpasim, idx);
360         spin_unlock(&vdpasim->lock);
361 }
362
363 static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
364 {
365         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
366         struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
367
368         return vq->ready;
369 }
370
371 static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
372                                 const struct vdpa_vq_state *state)
373 {
374         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
375         struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
376         struct vringh *vrh = &vq->vring;
377
378         spin_lock(&vdpasim->lock);
379         vrh->last_avail_idx = state->split.avail_index;
380         spin_unlock(&vdpasim->lock);
381
382         return 0;
383 }
384
385 static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
386                                 struct vdpa_vq_state *state)
387 {
388         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
389         struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
390         struct vringh *vrh = &vq->vring;
391
392         state->split.avail_index = vrh->last_avail_idx;
393         return 0;
394 }
395
396 static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
397 {
398         return VDPASIM_QUEUE_ALIGN;
399 }
400
401 static u64 vdpasim_get_features(struct vdpa_device *vdpa)
402 {
403         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
404
405         return vdpasim->dev_attr.supported_features;
406 }
407
408 static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
409 {
410         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
411
412         /* DMA mapping must be done by driver */
413         if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
414                 return -EINVAL;
415
416         vdpasim->features = features & vdpasim->dev_attr.supported_features;
417
418         return 0;
419 }
420
421 static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
422                                   struct vdpa_callback *cb)
423 {
424         /* We don't support config interrupt */
425 }
426
427 static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
428 {
429         return VDPASIM_QUEUE_MAX;
430 }
431
432 static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
433 {
434         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
435
436         return vdpasim->dev_attr.id;
437 }
438
439 static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
440 {
441         return VDPASIM_VENDOR_ID;
442 }
443
444 static u8 vdpasim_get_status(struct vdpa_device *vdpa)
445 {
446         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
447         u8 status;
448
449         spin_lock(&vdpasim->lock);
450         status = vdpasim->status;
451         spin_unlock(&vdpasim->lock);
452
453         return status;
454 }
455
456 static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
457 {
458         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
459
460         spin_lock(&vdpasim->lock);
461         vdpasim->status = status;
462         if (status == 0)
463                 vdpasim_reset(vdpasim);
464         spin_unlock(&vdpasim->lock);
465 }
466
467 static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
468 {
469         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
470
471         return vdpasim->dev_attr.config_size;
472 }
473
474 static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
475                              void *buf, unsigned int len)
476 {
477         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
478
479         if (offset + len > vdpasim->dev_attr.config_size)
480                 return;
481
482         if (vdpasim->dev_attr.get_config)
483                 vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
484
485         memcpy(buf, vdpasim->config + offset, len);
486 }
487
488 static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
489                              const void *buf, unsigned int len)
490 {
491         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
492
493         if (offset + len > vdpasim->dev_attr.config_size)
494                 return;
495
496         memcpy(vdpasim->config + offset, buf, len);
497
498         if (vdpasim->dev_attr.set_config)
499                 vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
500 }
501
502 static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
503 {
504         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
505
506         return vdpasim->generation;
507 }
508
509 static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
510 {
511         struct vdpa_iova_range range = {
512                 .first = 0ULL,
513                 .last = ULLONG_MAX,
514         };
515
516         return range;
517 }
518
519 static int vdpasim_set_map(struct vdpa_device *vdpa,
520                            struct vhost_iotlb *iotlb)
521 {
522         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
523         struct vhost_iotlb_map *map;
524         u64 start = 0ULL, last = 0ULL - 1;
525         int ret;
526
527         spin_lock(&vdpasim->iommu_lock);
528         vhost_iotlb_reset(vdpasim->iommu);
529
530         for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
531              map = vhost_iotlb_itree_next(map, start, last)) {
532                 ret = vhost_iotlb_add_range(vdpasim->iommu, map->start,
533                                             map->last, map->addr, map->perm);
534                 if (ret)
535                         goto err;
536         }
537         spin_unlock(&vdpasim->iommu_lock);
538         return 0;
539
540 err:
541         vhost_iotlb_reset(vdpasim->iommu);
542         spin_unlock(&vdpasim->iommu_lock);
543         return ret;
544 }
545
546 static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
547                            u64 pa, u32 perm)
548 {
549         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
550         int ret;
551
552         spin_lock(&vdpasim->iommu_lock);
553         ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
554                                     perm);
555         spin_unlock(&vdpasim->iommu_lock);
556
557         return ret;
558 }
559
560 static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
561 {
562         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
563
564         spin_lock(&vdpasim->iommu_lock);
565         vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
566         spin_unlock(&vdpasim->iommu_lock);
567
568         return 0;
569 }
570
571 static void vdpasim_free(struct vdpa_device *vdpa)
572 {
573         struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
574         int i;
575
576         cancel_work_sync(&vdpasim->work);
577
578         for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
579                 vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov);
580                 vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
581         }
582
583         put_iova_domain(&vdpasim->iova);
584         iova_cache_put();
585         kvfree(vdpasim->buffer);
586         if (vdpasim->iommu)
587                 vhost_iotlb_free(vdpasim->iommu);
588         kfree(vdpasim->vqs);
589         kfree(vdpasim->config);
590 }
591
592 static const struct vdpa_config_ops vdpasim_config_ops = {
593         .set_vq_address         = vdpasim_set_vq_address,
594         .set_vq_num             = vdpasim_set_vq_num,
595         .kick_vq                = vdpasim_kick_vq,
596         .set_vq_cb              = vdpasim_set_vq_cb,
597         .set_vq_ready           = vdpasim_set_vq_ready,
598         .get_vq_ready           = vdpasim_get_vq_ready,
599         .set_vq_state           = vdpasim_set_vq_state,
600         .get_vq_state           = vdpasim_get_vq_state,
601         .get_vq_align           = vdpasim_get_vq_align,
602         .get_features           = vdpasim_get_features,
603         .set_features           = vdpasim_set_features,
604         .set_config_cb          = vdpasim_set_config_cb,
605         .get_vq_num_max         = vdpasim_get_vq_num_max,
606         .get_device_id          = vdpasim_get_device_id,
607         .get_vendor_id          = vdpasim_get_vendor_id,
608         .get_status             = vdpasim_get_status,
609         .set_status             = vdpasim_set_status,
610         .get_config_size        = vdpasim_get_config_size,
611         .get_config             = vdpasim_get_config,
612         .set_config             = vdpasim_set_config,
613         .get_generation         = vdpasim_get_generation,
614         .get_iova_range         = vdpasim_get_iova_range,
615         .dma_map                = vdpasim_dma_map,
616         .dma_unmap              = vdpasim_dma_unmap,
617         .free                   = vdpasim_free,
618 };
619
620 static const struct vdpa_config_ops vdpasim_batch_config_ops = {
621         .set_vq_address         = vdpasim_set_vq_address,
622         .set_vq_num             = vdpasim_set_vq_num,
623         .kick_vq                = vdpasim_kick_vq,
624         .set_vq_cb              = vdpasim_set_vq_cb,
625         .set_vq_ready           = vdpasim_set_vq_ready,
626         .get_vq_ready           = vdpasim_get_vq_ready,
627         .set_vq_state           = vdpasim_set_vq_state,
628         .get_vq_state           = vdpasim_get_vq_state,
629         .get_vq_align           = vdpasim_get_vq_align,
630         .get_features           = vdpasim_get_features,
631         .set_features           = vdpasim_set_features,
632         .set_config_cb          = vdpasim_set_config_cb,
633         .get_vq_num_max         = vdpasim_get_vq_num_max,
634         .get_device_id          = vdpasim_get_device_id,
635         .get_vendor_id          = vdpasim_get_vendor_id,
636         .get_status             = vdpasim_get_status,
637         .set_status             = vdpasim_set_status,
638         .get_config_size        = vdpasim_get_config_size,
639         .get_config             = vdpasim_get_config,
640         .set_config             = vdpasim_set_config,
641         .get_generation         = vdpasim_get_generation,
642         .get_iova_range         = vdpasim_get_iova_range,
643         .set_map                = vdpasim_set_map,
644         .free                   = vdpasim_free,
645 };
646
647 MODULE_VERSION(DRV_VERSION);
648 MODULE_LICENSE(DRV_LICENSE);
649 MODULE_AUTHOR(DRV_AUTHOR);
650 MODULE_DESCRIPTION(DRV_DESC);