vDPA/ifcvf: implement management netlink framework for ifcvf
[linux-2.6-microblaze.git] / drivers / vdpa / ifcvf / ifcvf_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel IFC VF NIC driver for virtio dataplane offloading
4  *
5  * Copyright (C) 2020 Intel Corporation.
6  *
7  * Author: Zhu Lingshan <lingshan.zhu@intel.com>
8  *
9  */
10
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/sysfs.h>
15 #include "ifcvf_base.h"
16
17 #define DRIVER_AUTHOR   "Intel Corporation"
18 #define IFCVF_DRIVER_NAME       "ifcvf"
19
20 static irqreturn_t ifcvf_config_changed(int irq, void *arg)
21 {
22         struct ifcvf_hw *vf = arg;
23
24         if (vf->config_cb.callback)
25                 return vf->config_cb.callback(vf->config_cb.private);
26
27         return IRQ_HANDLED;
28 }
29
30 static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
31 {
32         struct vring_info *vring = arg;
33
34         if (vring->cb.callback)
35                 return vring->cb.callback(vring->cb.private);
36
37         return IRQ_HANDLED;
38 }
39
40 static void ifcvf_free_irq_vectors(void *data)
41 {
42         pci_free_irq_vectors(data);
43 }
44
45 static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
46 {
47         struct pci_dev *pdev = adapter->pdev;
48         struct ifcvf_hw *vf = &adapter->vf;
49         int i;
50
51
52         for (i = 0; i < queues; i++) {
53                 devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
54                 vf->vring[i].irq = -EINVAL;
55         }
56
57         devm_free_irq(&pdev->dev, vf->config_irq, vf);
58         ifcvf_free_irq_vectors(pdev);
59 }
60
61 static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
62 {
63         struct pci_dev *pdev = adapter->pdev;
64         struct ifcvf_hw *vf = &adapter->vf;
65         int vector, i, ret, irq;
66
67         ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
68                                     IFCVF_MAX_INTR, PCI_IRQ_MSIX);
69         if (ret < 0) {
70                 IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
71                 return ret;
72         }
73
74         snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
75                  pci_name(pdev));
76         vector = 0;
77         vf->config_irq = pci_irq_vector(pdev, vector);
78         ret = devm_request_irq(&pdev->dev, vf->config_irq,
79                                ifcvf_config_changed, 0,
80                                vf->config_msix_name, vf);
81         if (ret) {
82                 IFCVF_ERR(pdev, "Failed to request config irq\n");
83                 return ret;
84         }
85
86         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
87                 snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
88                          pci_name(pdev), i);
89                 vector = i + IFCVF_MSI_QUEUE_OFF;
90                 irq = pci_irq_vector(pdev, vector);
91                 ret = devm_request_irq(&pdev->dev, irq,
92                                        ifcvf_intr_handler, 0,
93                                        vf->vring[i].msix_name,
94                                        &vf->vring[i]);
95                 if (ret) {
96                         IFCVF_ERR(pdev,
97                                   "Failed to request irq for vq %d\n", i);
98                         ifcvf_free_irq(adapter, i);
99
100                         return ret;
101                 }
102
103                 vf->vring[i].irq = irq;
104         }
105
106         return 0;
107 }
108
109 static int ifcvf_start_datapath(void *private)
110 {
111         struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
112         u8 status;
113         int ret;
114
115         vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
116         ret = ifcvf_start_hw(vf);
117         if (ret < 0) {
118                 status = ifcvf_get_status(vf);
119                 status |= VIRTIO_CONFIG_S_FAILED;
120                 ifcvf_set_status(vf, status);
121         }
122
123         return ret;
124 }
125
126 static int ifcvf_stop_datapath(void *private)
127 {
128         struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
129         int i;
130
131         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
132                 vf->vring[i].cb.callback = NULL;
133
134         ifcvf_stop_hw(vf);
135
136         return 0;
137 }
138
139 static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
140 {
141         struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
142         int i;
143
144         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
145                 vf->vring[i].last_avail_idx = 0;
146                 vf->vring[i].desc = 0;
147                 vf->vring[i].avail = 0;
148                 vf->vring[i].used = 0;
149                 vf->vring[i].ready = 0;
150                 vf->vring[i].cb.callback = NULL;
151                 vf->vring[i].cb.private = NULL;
152         }
153
154         ifcvf_reset(vf);
155 }
156
157 static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
158 {
159         return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
160 }
161
162 static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
163 {
164         struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
165
166         return &adapter->vf;
167 }
168
169 static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
170 {
171         struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
172         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
173         struct pci_dev *pdev = adapter->pdev;
174
175         u64 features;
176
177         switch (vf->dev_type) {
178         case VIRTIO_ID_NET:
179                 features = ifcvf_get_features(vf) & IFCVF_NET_SUPPORTED_FEATURES;
180                 break;
181         case VIRTIO_ID_BLOCK:
182                 features = ifcvf_get_features(vf);
183                 break;
184         default:
185                 features = 0;
186                 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
187         }
188
189         return features;
190 }
191
192 static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
193 {
194         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
195         int ret;
196
197         ret = ifcvf_verify_min_features(vf, features);
198         if (ret)
199                 return ret;
200
201         vf->req_features = features;
202
203         return 0;
204 }
205
206 static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
207 {
208         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
209
210         return ifcvf_get_status(vf);
211 }
212
213 static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
214 {
215         struct ifcvf_adapter *adapter;
216         struct ifcvf_hw *vf;
217         u8 status_old;
218         int ret;
219
220         vf  = vdpa_to_vf(vdpa_dev);
221         adapter = vdpa_to_adapter(vdpa_dev);
222         status_old = ifcvf_get_status(vf);
223
224         if (status_old == status)
225                 return;
226
227         if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) &&
228             !(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
229                 ifcvf_stop_datapath(adapter);
230                 ifcvf_free_irq(adapter, IFCVF_MAX_QUEUE_PAIRS * 2);
231         }
232
233         if (status == 0) {
234                 ifcvf_reset_vring(adapter);
235                 return;
236         }
237
238         if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
239             !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
240                 ret = ifcvf_request_irq(adapter);
241                 if (ret) {
242                         status = ifcvf_get_status(vf);
243                         status |= VIRTIO_CONFIG_S_FAILED;
244                         ifcvf_set_status(vf, status);
245                         return;
246                 }
247
248                 if (ifcvf_start_datapath(adapter) < 0)
249                         IFCVF_ERR(adapter->pdev,
250                                   "Failed to set ifcvf vdpa  status %u\n",
251                                   status);
252         }
253
254         ifcvf_set_status(vf, status);
255 }
256
257 static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
258 {
259         return IFCVF_QUEUE_MAX;
260 }
261
262 static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
263                                    struct vdpa_vq_state *state)
264 {
265         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
266
267         state->split.avail_index = ifcvf_get_vq_state(vf, qid);
268         return 0;
269 }
270
271 static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
272                                    const struct vdpa_vq_state *state)
273 {
274         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
275
276         return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
277 }
278
279 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
280                                  struct vdpa_callback *cb)
281 {
282         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
283
284         vf->vring[qid].cb = *cb;
285 }
286
287 static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
288                                     u16 qid, bool ready)
289 {
290         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
291
292         vf->vring[qid].ready = ready;
293 }
294
295 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
296 {
297         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
298
299         return vf->vring[qid].ready;
300 }
301
302 static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
303                                   u32 num)
304 {
305         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
306
307         vf->vring[qid].size = num;
308 }
309
310 static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
311                                      u64 desc_area, u64 driver_area,
312                                      u64 device_area)
313 {
314         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
315
316         vf->vring[qid].desc = desc_area;
317         vf->vring[qid].avail = driver_area;
318         vf->vring[qid].used = device_area;
319
320         return 0;
321 }
322
323 static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
324 {
325         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
326
327         ifcvf_notify_queue(vf, qid);
328 }
329
330 static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
331 {
332         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
333
334         return ioread8(&vf->common_cfg->config_generation);
335 }
336
337 static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
338 {
339         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
340
341         return vf->dev_type;
342 }
343
344 static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
345 {
346         struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
347         struct pci_dev *pdev = adapter->pdev;
348
349         return pdev->subsystem_vendor;
350 }
351
352 static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
353 {
354         return IFCVF_QUEUE_ALIGNMENT;
355 }
356
357 static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
358 {
359         struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
360         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
361         struct pci_dev *pdev = adapter->pdev;
362         size_t size;
363
364         switch (vf->dev_type) {
365         case VIRTIO_ID_NET:
366                 size = sizeof(struct virtio_net_config);
367                 break;
368         case VIRTIO_ID_BLOCK:
369                 size = sizeof(struct virtio_blk_config);
370                 break;
371         default:
372                 size = 0;
373                 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
374         }
375
376         return size;
377 }
378
379 static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
380                                   unsigned int offset,
381                                   void *buf, unsigned int len)
382 {
383         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
384
385         WARN_ON(offset + len > sizeof(struct virtio_net_config));
386         ifcvf_read_net_config(vf, offset, buf, len);
387 }
388
389 static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
390                                   unsigned int offset, const void *buf,
391                                   unsigned int len)
392 {
393         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
394
395         WARN_ON(offset + len > sizeof(struct virtio_net_config));
396         ifcvf_write_net_config(vf, offset, buf, len);
397 }
398
399 static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
400                                      struct vdpa_callback *cb)
401 {
402         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
403
404         vf->config_cb.callback = cb->callback;
405         vf->config_cb.private = cb->private;
406 }
407
408 static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
409                                  u16 qid)
410 {
411         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
412
413         return vf->vring[qid].irq;
414 }
415
416 static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
417                                                                u16 idx)
418 {
419         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
420         struct vdpa_notification_area area;
421
422         area.addr = vf->vring[idx].notify_pa;
423         if (!vf->notify_off_multiplier)
424                 area.size = PAGE_SIZE;
425         else
426                 area.size = vf->notify_off_multiplier;
427
428         return area;
429 }
430
431 /*
432  * IFCVF currently does't have on-chip IOMMU, so not
433  * implemented set_map()/dma_map()/dma_unmap()
434  */
435 static const struct vdpa_config_ops ifc_vdpa_ops = {
436         .get_features   = ifcvf_vdpa_get_features,
437         .set_features   = ifcvf_vdpa_set_features,
438         .get_status     = ifcvf_vdpa_get_status,
439         .set_status     = ifcvf_vdpa_set_status,
440         .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
441         .get_vq_state   = ifcvf_vdpa_get_vq_state,
442         .set_vq_state   = ifcvf_vdpa_set_vq_state,
443         .set_vq_cb      = ifcvf_vdpa_set_vq_cb,
444         .set_vq_ready   = ifcvf_vdpa_set_vq_ready,
445         .get_vq_ready   = ifcvf_vdpa_get_vq_ready,
446         .set_vq_num     = ifcvf_vdpa_set_vq_num,
447         .set_vq_address = ifcvf_vdpa_set_vq_address,
448         .get_vq_irq     = ifcvf_vdpa_get_vq_irq,
449         .kick_vq        = ifcvf_vdpa_kick_vq,
450         .get_generation = ifcvf_vdpa_get_generation,
451         .get_device_id  = ifcvf_vdpa_get_device_id,
452         .get_vendor_id  = ifcvf_vdpa_get_vendor_id,
453         .get_vq_align   = ifcvf_vdpa_get_vq_align,
454         .get_config_size        = ifcvf_vdpa_get_config_size,
455         .get_config     = ifcvf_vdpa_get_config,
456         .set_config     = ifcvf_vdpa_set_config,
457         .set_config_cb  = ifcvf_vdpa_set_config_cb,
458         .get_vq_notification = ifcvf_get_vq_notification,
459 };
460
461 static struct virtio_device_id id_table_net[] = {
462         {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
463         {0},
464 };
465
466 static struct virtio_device_id id_table_blk[] = {
467         {VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID},
468         {0},
469 };
470
471 static u32 get_dev_type(struct pci_dev *pdev)
472 {
473         u32 dev_type;
474
475         /* This drirver drives both modern virtio devices and transitional
476          * devices in modern mode.
477          * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
478          * so legacy devices and transitional devices in legacy
479          * mode will not work for vDPA, this driver will not
480          * drive devices with legacy interface.
481          */
482
483         if (pdev->device < 0x1040)
484                 dev_type =  pdev->subsystem_device;
485         else
486                 dev_type =  pdev->device - 0x1040;
487
488         return dev_type;
489 }
490
491 static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
492 {
493         struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
494         struct ifcvf_adapter *adapter;
495         struct pci_dev *pdev;
496         struct ifcvf_hw *vf;
497         struct device *dev;
498         int ret, i;
499
500         ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
501         if (ifcvf_mgmt_dev->adapter)
502                 return -EOPNOTSUPP;
503
504         pdev = ifcvf_mgmt_dev->pdev;
505         dev = &pdev->dev;
506         adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
507                                     dev, &ifc_vdpa_ops, name);
508         if (IS_ERR(adapter)) {
509                 IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
510                 return PTR_ERR(adapter);
511         }
512
513         ifcvf_mgmt_dev->adapter = adapter;
514         pci_set_drvdata(pdev, ifcvf_mgmt_dev);
515
516         vf = &adapter->vf;
517         vf->dev_type = get_dev_type(pdev);
518         vf->base = pcim_iomap_table(pdev);
519
520         adapter->pdev = pdev;
521         adapter->vdpa.dma_dev = &pdev->dev;
522
523         ret = ifcvf_init_hw(vf, pdev);
524         if (ret) {
525                 IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
526                 goto err;
527         }
528
529         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
530                 vf->vring[i].irq = -EINVAL;
531
532         vf->hw_features = ifcvf_get_hw_features(vf);
533
534         adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
535         ret = _vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2);
536         if (ret) {
537                 IFCVF_ERR(pdev, "Failed to register to vDPA bus");
538                 goto err;
539         }
540
541         return 0;
542
543 err:
544         put_device(&adapter->vdpa.dev);
545         return ret;
546 }
547
548 static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
549 {
550         struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
551
552         ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
553         _vdpa_unregister_device(dev);
554         ifcvf_mgmt_dev->adapter = NULL;
555 }
556
557 static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops = {
558         .dev_add = ifcvf_vdpa_dev_add,
559         .dev_del = ifcvf_vdpa_dev_del
560 };
561
562 static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
563 {
564         struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
565         struct device *dev = &pdev->dev;
566         u32 dev_type;
567         int ret;
568
569         ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
570         if (!ifcvf_mgmt_dev) {
571                 IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
572                 return -ENOMEM;
573         }
574
575         dev_type = get_dev_type(pdev);
576         switch (dev_type) {
577         case VIRTIO_ID_NET:
578                 ifcvf_mgmt_dev->mdev.id_table = id_table_net;
579                 break;
580         case VIRTIO_ID_BLOCK:
581                 ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
582                 break;
583         default:
584                 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
585                 ret = -EOPNOTSUPP;
586                 goto err;
587         }
588
589         ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
590         ifcvf_mgmt_dev->mdev.device = dev;
591         ifcvf_mgmt_dev->pdev = pdev;
592
593         ret = pcim_enable_device(pdev);
594         if (ret) {
595                 IFCVF_ERR(pdev, "Failed to enable device\n");
596                 goto err;
597         }
598
599         ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
600                                  IFCVF_DRIVER_NAME);
601         if (ret) {
602                 IFCVF_ERR(pdev, "Failed to request MMIO region\n");
603                 goto err;
604         }
605
606         ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
607         if (ret) {
608                 IFCVF_ERR(pdev, "No usable DMA configuration\n");
609                 goto err;
610         }
611
612         ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
613         if (ret) {
614                 IFCVF_ERR(pdev,
615                           "Failed for adding devres for freeing irq vectors\n");
616                 goto err;
617         }
618
619         pci_set_master(pdev);
620
621         ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
622         if (ret) {
623                 IFCVF_ERR(pdev,
624                           "Failed to initialize the management interfaces\n");
625                 goto err;
626         }
627
628         return 0;
629
630 err:
631         kfree(ifcvf_mgmt_dev);
632         return ret;
633 }
634
635 static void ifcvf_remove(struct pci_dev *pdev)
636 {
637         struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
638
639         ifcvf_mgmt_dev = pci_get_drvdata(pdev);
640         vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev->mdev);
641         kfree(ifcvf_mgmt_dev);
642 }
643
644 static struct pci_device_id ifcvf_pci_ids[] = {
645         /* N3000 network device */
646         { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
647                          N3000_DEVICE_ID,
648                          PCI_VENDOR_ID_INTEL,
649                          N3000_SUBSYS_DEVICE_ID) },
650         /* C5000X-PL network device */
651         { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
652                          VIRTIO_TRANS_ID_NET,
653                          PCI_VENDOR_ID_INTEL,
654                          VIRTIO_ID_NET) },
655         /* C5000X-PL block device */
656         { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
657                          VIRTIO_TRANS_ID_BLOCK,
658                          PCI_VENDOR_ID_INTEL,
659                          VIRTIO_ID_BLOCK) },
660
661         { 0 },
662 };
663 MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
664
665 static struct pci_driver ifcvf_driver = {
666         .name     = IFCVF_DRIVER_NAME,
667         .id_table = ifcvf_pci_ids,
668         .probe    = ifcvf_probe,
669         .remove   = ifcvf_remove,
670 };
671
672 module_pci_driver(ifcvf_driver);
673
674 MODULE_LICENSE("GPL v2");