1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel IFC VF NIC driver for virtio dataplane offloading
5 * Copyright (C) 2020 Intel Corporation.
7 * Author: Zhu Lingshan <lingshan.zhu@intel.com>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/sysfs.h>
15 #include "ifcvf_base.h"
17 #define DRIVER_AUTHOR "Intel Corporation"
18 #define IFCVF_DRIVER_NAME "ifcvf"
20 static irqreturn_t ifcvf_config_changed(int irq, void *arg)
22 struct ifcvf_hw *vf = arg;
24 if (vf->config_cb.callback)
25 return vf->config_cb.callback(vf->config_cb.private);
30 static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
32 struct vring_info *vring = arg;
34 if (vring->cb.callback)
35 return vring->cb.callback(vring->cb.private);
40 static void ifcvf_free_irq_vectors(void *data)
42 pci_free_irq_vectors(data);
45 static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
47 struct pci_dev *pdev = adapter->pdev;
48 struct ifcvf_hw *vf = &adapter->vf;
52 for (i = 0; i < queues; i++) {
53 devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
54 vf->vring[i].irq = -EINVAL;
57 devm_free_irq(&pdev->dev, vf->config_irq, vf);
58 ifcvf_free_irq_vectors(pdev);
61 static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
63 struct pci_dev *pdev = adapter->pdev;
64 struct ifcvf_hw *vf = &adapter->vf;
65 int vector, i, ret, irq;
67 ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
68 IFCVF_MAX_INTR, PCI_IRQ_MSIX);
70 IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
74 snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
77 vf->config_irq = pci_irq_vector(pdev, vector);
78 ret = devm_request_irq(&pdev->dev, vf->config_irq,
79 ifcvf_config_changed, 0,
80 vf->config_msix_name, vf);
82 IFCVF_ERR(pdev, "Failed to request config irq\n");
86 for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
87 snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
89 vector = i + IFCVF_MSI_QUEUE_OFF;
90 irq = pci_irq_vector(pdev, vector);
91 ret = devm_request_irq(&pdev->dev, irq,
92 ifcvf_intr_handler, 0,
93 vf->vring[i].msix_name,
97 "Failed to request irq for vq %d\n", i);
98 ifcvf_free_irq(adapter, i);
103 vf->vring[i].irq = irq;
109 static int ifcvf_start_datapath(void *private)
111 struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
115 vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
116 ret = ifcvf_start_hw(vf);
118 status = ifcvf_get_status(vf);
119 status |= VIRTIO_CONFIG_S_FAILED;
120 ifcvf_set_status(vf, status);
126 static int ifcvf_stop_datapath(void *private)
128 struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
131 for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
132 vf->vring[i].cb.callback = NULL;
139 static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
141 struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
144 for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
145 vf->vring[i].last_avail_idx = 0;
146 vf->vring[i].desc = 0;
147 vf->vring[i].avail = 0;
148 vf->vring[i].used = 0;
149 vf->vring[i].ready = 0;
150 vf->vring[i].cb.callback = NULL;
151 vf->vring[i].cb.private = NULL;
157 static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
159 return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
162 static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
164 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
169 static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
171 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
172 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
173 struct pci_dev *pdev = adapter->pdev;
177 switch (vf->dev_type) {
179 features = ifcvf_get_features(vf) & IFCVF_NET_SUPPORTED_FEATURES;
181 case VIRTIO_ID_BLOCK:
182 features = ifcvf_get_features(vf);
186 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
192 static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
194 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
197 ret = ifcvf_verify_min_features(vf, features);
201 vf->req_features = features;
206 static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
208 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
210 return ifcvf_get_status(vf);
213 static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
215 struct ifcvf_adapter *adapter;
220 vf = vdpa_to_vf(vdpa_dev);
221 adapter = dev_get_drvdata(vdpa_dev->dev.parent);
222 status_old = ifcvf_get_status(vf);
224 if (status_old == status)
227 if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) &&
228 !(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
229 ifcvf_stop_datapath(adapter);
230 ifcvf_free_irq(adapter, IFCVF_MAX_QUEUE_PAIRS * 2);
234 ifcvf_reset_vring(adapter);
238 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
239 !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
240 ret = ifcvf_request_irq(adapter);
242 status = ifcvf_get_status(vf);
243 status |= VIRTIO_CONFIG_S_FAILED;
244 ifcvf_set_status(vf, status);
248 if (ifcvf_start_datapath(adapter) < 0)
249 IFCVF_ERR(adapter->pdev,
250 "Failed to set ifcvf vdpa status %u\n",
254 ifcvf_set_status(vf, status);
257 static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
259 return IFCVF_QUEUE_MAX;
262 static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
263 struct vdpa_vq_state *state)
265 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
267 state->split.avail_index = ifcvf_get_vq_state(vf, qid);
271 static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
272 const struct vdpa_vq_state *state)
274 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
276 return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
279 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
280 struct vdpa_callback *cb)
282 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
284 vf->vring[qid].cb = *cb;
287 static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
290 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
292 vf->vring[qid].ready = ready;
295 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
297 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
299 return vf->vring[qid].ready;
302 static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
305 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
307 vf->vring[qid].size = num;
310 static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
311 u64 desc_area, u64 driver_area,
314 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
316 vf->vring[qid].desc = desc_area;
317 vf->vring[qid].avail = driver_area;
318 vf->vring[qid].used = device_area;
323 static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
325 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
327 ifcvf_notify_queue(vf, qid);
330 static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
332 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
334 return ioread8(&vf->common_cfg->config_generation);
337 static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
339 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
344 static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
346 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
347 struct pci_dev *pdev = adapter->pdev;
349 return pdev->subsystem_vendor;
352 static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
354 return IFCVF_QUEUE_ALIGNMENT;
357 static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
359 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
360 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
361 struct pci_dev *pdev = adapter->pdev;
364 switch (vf->dev_type) {
366 size = sizeof(struct virtio_net_config);
368 case VIRTIO_ID_BLOCK:
369 size = sizeof(struct virtio_blk_config);
373 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
379 static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
381 void *buf, unsigned int len)
383 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
385 WARN_ON(offset + len > sizeof(struct virtio_net_config));
386 ifcvf_read_net_config(vf, offset, buf, len);
389 static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
390 unsigned int offset, const void *buf,
393 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
395 WARN_ON(offset + len > sizeof(struct virtio_net_config));
396 ifcvf_write_net_config(vf, offset, buf, len);
399 static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
400 struct vdpa_callback *cb)
402 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
404 vf->config_cb.callback = cb->callback;
405 vf->config_cb.private = cb->private;
408 static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
411 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
413 return vf->vring[qid].irq;
416 static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
419 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
420 struct vdpa_notification_area area;
422 area.addr = vf->vring[idx].notify_pa;
423 if (!vf->notify_off_multiplier)
424 area.size = PAGE_SIZE;
426 area.size = vf->notify_off_multiplier;
432 * IFCVF currently does't have on-chip IOMMU, so not
433 * implemented set_map()/dma_map()/dma_unmap()
435 static const struct vdpa_config_ops ifc_vdpa_ops = {
436 .get_features = ifcvf_vdpa_get_features,
437 .set_features = ifcvf_vdpa_set_features,
438 .get_status = ifcvf_vdpa_get_status,
439 .set_status = ifcvf_vdpa_set_status,
440 .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
441 .get_vq_state = ifcvf_vdpa_get_vq_state,
442 .set_vq_state = ifcvf_vdpa_set_vq_state,
443 .set_vq_cb = ifcvf_vdpa_set_vq_cb,
444 .set_vq_ready = ifcvf_vdpa_set_vq_ready,
445 .get_vq_ready = ifcvf_vdpa_get_vq_ready,
446 .set_vq_num = ifcvf_vdpa_set_vq_num,
447 .set_vq_address = ifcvf_vdpa_set_vq_address,
448 .get_vq_irq = ifcvf_vdpa_get_vq_irq,
449 .kick_vq = ifcvf_vdpa_kick_vq,
450 .get_generation = ifcvf_vdpa_get_generation,
451 .get_device_id = ifcvf_vdpa_get_device_id,
452 .get_vendor_id = ifcvf_vdpa_get_vendor_id,
453 .get_vq_align = ifcvf_vdpa_get_vq_align,
454 .get_config_size = ifcvf_vdpa_get_config_size,
455 .get_config = ifcvf_vdpa_get_config,
456 .set_config = ifcvf_vdpa_set_config,
457 .set_config_cb = ifcvf_vdpa_set_config_cb,
458 .get_vq_notification = ifcvf_get_vq_notification,
461 static u32 get_dev_type(struct pci_dev *pdev)
465 /* This drirver drives both modern virtio devices and transitional
466 * devices in modern mode.
467 * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
468 * so legacy devices and transitional devices in legacy
469 * mode will not work for vDPA, this driver will not
470 * drive devices with legacy interface.
473 if (pdev->device < 0x1040)
474 dev_type = pdev->subsystem_device;
476 dev_type = pdev->device - 0x1040;
481 static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
483 struct device *dev = &pdev->dev;
484 struct ifcvf_adapter *adapter;
488 ret = pcim_enable_device(pdev);
490 IFCVF_ERR(pdev, "Failed to enable device\n");
494 ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
497 IFCVF_ERR(pdev, "Failed to request MMIO region\n");
501 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
503 IFCVF_ERR(pdev, "No usable DMA configuration\n");
507 ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
510 "Failed for adding devres for freeing irq vectors\n");
514 adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
515 dev, &ifc_vdpa_ops, NULL);
516 if (IS_ERR(adapter)) {
517 IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
518 return PTR_ERR(adapter);
521 pci_set_master(pdev);
522 pci_set_drvdata(pdev, adapter);
525 vf->dev_type = get_dev_type(pdev);
526 vf->base = pcim_iomap_table(pdev);
528 adapter->pdev = pdev;
529 adapter->vdpa.dma_dev = &pdev->dev;
531 ret = ifcvf_init_hw(vf, pdev);
533 IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
537 for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
538 vf->vring[i].irq = -EINVAL;
540 vf->hw_features = ifcvf_get_hw_features(vf);
542 ret = vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2);
544 IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
551 put_device(&adapter->vdpa.dev);
555 static void ifcvf_remove(struct pci_dev *pdev)
557 struct ifcvf_adapter *adapter = pci_get_drvdata(pdev);
559 vdpa_unregister_device(&adapter->vdpa);
562 static struct pci_device_id ifcvf_pci_ids[] = {
563 /* N3000 network device */
564 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
567 N3000_SUBSYS_DEVICE_ID) },
568 /* C5000X-PL network device */
569 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
573 /* C5000X-PL block device */
574 { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
575 VIRTIO_TRANS_ID_BLOCK,
581 MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
583 static struct pci_driver ifcvf_driver = {
584 .name = IFCVF_DRIVER_NAME,
585 .id_table = ifcvf_pci_ids,
586 .probe = ifcvf_probe,
587 .remove = ifcvf_remove,
590 module_pci_driver(ifcvf_driver);
592 MODULE_LICENSE("GPL v2");