1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel IFC VF NIC driver for virtio dataplane offloading
5 * Copyright (C) 2020 Intel Corporation.
7 * Author: Zhu Lingshan <lingshan.zhu@intel.com>
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/sysfs.h>
15 #include "ifcvf_base.h"
17 #define VERSION_STRING "0.1"
18 #define DRIVER_AUTHOR "Intel Corporation"
19 #define IFCVF_DRIVER_NAME "ifcvf"
21 static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
23 struct vring_info *vring = arg;
25 if (vring->cb.callback)
26 return vring->cb.callback(vring->cb.private);
31 static int ifcvf_start_datapath(void *private)
33 struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
34 struct ifcvf_adapter *ifcvf;
38 ifcvf = vf_to_adapter(vf);
39 vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
40 ret = ifcvf_start_hw(vf);
42 status = ifcvf_get_status(vf);
43 status |= VIRTIO_CONFIG_S_FAILED;
44 ifcvf_set_status(vf, status);
50 static int ifcvf_stop_datapath(void *private)
52 struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
55 for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
56 vf->vring[i].cb.callback = NULL;
63 static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
65 struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
68 for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
69 vf->vring[i].last_avail_idx = 0;
70 vf->vring[i].desc = 0;
71 vf->vring[i].avail = 0;
72 vf->vring[i].used = 0;
73 vf->vring[i].ready = 0;
74 vf->vring[i].cb.callback = NULL;
75 vf->vring[i].cb.private = NULL;
81 static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
83 return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
86 static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
88 struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
93 static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
95 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
98 features = ifcvf_get_features(vf) & IFCVF_SUPPORTED_FEATURES;
103 static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
105 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
107 vf->req_features = features;
112 static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
114 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
116 return ifcvf_get_status(vf);
119 static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
121 struct ifcvf_adapter *adapter;
124 vf = vdpa_to_vf(vdpa_dev);
125 adapter = dev_get_drvdata(vdpa_dev->dev.parent);
128 ifcvf_stop_datapath(adapter);
129 ifcvf_reset_vring(adapter);
133 if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
134 if (ifcvf_start_datapath(adapter) < 0)
135 IFCVF_ERR(adapter->pdev,
136 "Failed to set ifcvf vdpa status %u\n",
140 ifcvf_set_status(vf, status);
143 static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
145 return IFCVF_QUEUE_MAX;
148 static u64 ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid)
150 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
152 return ifcvf_get_vq_state(vf, qid);
155 static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
158 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
160 return ifcvf_set_vq_state(vf, qid, num);
163 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
164 struct vdpa_callback *cb)
166 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
168 vf->vring[qid].cb = *cb;
171 static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
174 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
176 vf->vring[qid].ready = ready;
179 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
181 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
183 return vf->vring[qid].ready;
186 static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
189 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
191 vf->vring[qid].size = num;
194 static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
195 u64 desc_area, u64 driver_area,
198 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
200 vf->vring[qid].desc = desc_area;
201 vf->vring[qid].avail = driver_area;
202 vf->vring[qid].used = device_area;
207 static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
209 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
211 ifcvf_notify_queue(vf, qid);
214 static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
216 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
218 return ioread8(&vf->common_cfg->config_generation);
221 static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
223 return VIRTIO_ID_NET;
226 static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
228 return IFCVF_SUBSYS_VENDOR_ID;
231 static u16 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
233 return IFCVF_QUEUE_ALIGNMENT;
236 static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
238 void *buf, unsigned int len)
240 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
242 WARN_ON(offset + len > sizeof(struct virtio_net_config));
243 ifcvf_read_net_config(vf, offset, buf, len);
246 static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
247 unsigned int offset, const void *buf,
250 struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
252 WARN_ON(offset + len > sizeof(struct virtio_net_config));
253 ifcvf_write_net_config(vf, offset, buf, len);
256 static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
257 struct vdpa_callback *cb)
259 /* We don't support config interrupt */
263 * IFCVF currently does't have on-chip IOMMU, so not
264 * implemented set_map()/dma_map()/dma_unmap()
266 static const struct vdpa_config_ops ifc_vdpa_ops = {
267 .get_features = ifcvf_vdpa_get_features,
268 .set_features = ifcvf_vdpa_set_features,
269 .get_status = ifcvf_vdpa_get_status,
270 .set_status = ifcvf_vdpa_set_status,
271 .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
272 .get_vq_state = ifcvf_vdpa_get_vq_state,
273 .set_vq_state = ifcvf_vdpa_set_vq_state,
274 .set_vq_cb = ifcvf_vdpa_set_vq_cb,
275 .set_vq_ready = ifcvf_vdpa_set_vq_ready,
276 .get_vq_ready = ifcvf_vdpa_get_vq_ready,
277 .set_vq_num = ifcvf_vdpa_set_vq_num,
278 .set_vq_address = ifcvf_vdpa_set_vq_address,
279 .kick_vq = ifcvf_vdpa_kick_vq,
280 .get_generation = ifcvf_vdpa_get_generation,
281 .get_device_id = ifcvf_vdpa_get_device_id,
282 .get_vendor_id = ifcvf_vdpa_get_vendor_id,
283 .get_vq_align = ifcvf_vdpa_get_vq_align,
284 .get_config = ifcvf_vdpa_get_config,
285 .set_config = ifcvf_vdpa_set_config,
286 .set_config_cb = ifcvf_vdpa_set_config_cb,
289 static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
291 struct pci_dev *pdev = adapter->pdev;
292 struct ifcvf_hw *vf = &adapter->vf;
293 int vector, i, ret, irq;
296 for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
297 snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
299 vector = i + IFCVF_MSI_QUEUE_OFF;
300 irq = pci_irq_vector(pdev, vector);
301 ret = devm_request_irq(&pdev->dev, irq,
302 ifcvf_intr_handler, 0,
303 vf->vring[i].msix_name,
307 "Failed to request irq for vq %d\n", i);
310 vf->vring[i].irq = irq;
316 static void ifcvf_free_irq_vectors(void *data)
318 pci_free_irq_vectors(data);
321 static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
323 struct device *dev = &pdev->dev;
324 struct ifcvf_adapter *adapter;
328 ret = pcim_enable_device(pdev);
330 IFCVF_ERR(pdev, "Failed to enable device\n");
334 ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
337 IFCVF_ERR(pdev, "Failed to request MMIO region\n");
341 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
343 IFCVF_ERR(pdev, "No usable DMA confiugration\n");
347 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
350 "No usable coherent DMA confiugration\n");
354 ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
355 IFCVF_MAX_INTR, PCI_IRQ_MSIX);
357 IFCVF_ERR(pdev, "Failed to alloc irq vectors\n");
361 ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
364 "Failed for adding devres for freeing irq vectors\n");
368 adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
370 if (adapter == NULL) {
371 IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
375 pci_set_master(pdev);
376 pci_set_drvdata(pdev, adapter);
379 vf->base = pcim_iomap_table(pdev);
381 adapter->pdev = pdev;
382 adapter->vdpa.dma_dev = &pdev->dev;
384 ret = ifcvf_request_irq(adapter);
386 IFCVF_ERR(pdev, "Failed to request MSI-X irq\n");
390 ret = ifcvf_init_hw(vf, pdev);
392 IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
396 ret = vdpa_register_device(&adapter->vdpa);
398 IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
405 put_device(&adapter->vdpa.dev);
409 static void ifcvf_remove(struct pci_dev *pdev)
411 struct ifcvf_adapter *adapter = pci_get_drvdata(pdev);
413 vdpa_unregister_device(&adapter->vdpa);
416 static struct pci_device_id ifcvf_pci_ids[] = {
417 { PCI_DEVICE_SUB(IFCVF_VENDOR_ID,
419 IFCVF_SUBSYS_VENDOR_ID,
420 IFCVF_SUBSYS_DEVICE_ID) },
423 MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
425 static struct pci_driver ifcvf_driver = {
426 .name = IFCVF_DRIVER_NAME,
427 .id_table = ifcvf_pci_ids,
428 .probe = ifcvf_probe,
429 .remove = ifcvf_remove,
432 module_pci_driver(ifcvf_driver);
434 MODULE_LICENSE("GPL v2");
435 MODULE_VERSION(VERSION_STRING);