vDPA/ifcvf: deduce VIRTIO device ID when probe
[linux-2.6-microblaze.git] / drivers / vdpa / ifcvf / ifcvf_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel IFC VF NIC driver for virtio dataplane offloading
4  *
5  * Copyright (C) 2020 Intel Corporation.
6  *
7  * Author: Zhu Lingshan <lingshan.zhu@intel.com>
8  *
9  */
10
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/sysfs.h>
15 #include "ifcvf_base.h"
16
17 #define DRIVER_AUTHOR   "Intel Corporation"
18 #define IFCVF_DRIVER_NAME       "ifcvf"
19
20 static irqreturn_t ifcvf_config_changed(int irq, void *arg)
21 {
22         struct ifcvf_hw *vf = arg;
23
24         if (vf->config_cb.callback)
25                 return vf->config_cb.callback(vf->config_cb.private);
26
27         return IRQ_HANDLED;
28 }
29
30 static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
31 {
32         struct vring_info *vring = arg;
33
34         if (vring->cb.callback)
35                 return vring->cb.callback(vring->cb.private);
36
37         return IRQ_HANDLED;
38 }
39
40 static void ifcvf_free_irq_vectors(void *data)
41 {
42         pci_free_irq_vectors(data);
43 }
44
45 static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
46 {
47         struct pci_dev *pdev = adapter->pdev;
48         struct ifcvf_hw *vf = &adapter->vf;
49         int i;
50
51
52         for (i = 0; i < queues; i++) {
53                 devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
54                 vf->vring[i].irq = -EINVAL;
55         }
56
57         devm_free_irq(&pdev->dev, vf->config_irq, vf);
58         ifcvf_free_irq_vectors(pdev);
59 }
60
61 static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
62 {
63         struct pci_dev *pdev = adapter->pdev;
64         struct ifcvf_hw *vf = &adapter->vf;
65         int vector, i, ret, irq;
66
67         ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
68                                     IFCVF_MAX_INTR, PCI_IRQ_MSIX);
69         if (ret < 0) {
70                 IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
71                 return ret;
72         }
73
74         snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
75                  pci_name(pdev));
76         vector = 0;
77         vf->config_irq = pci_irq_vector(pdev, vector);
78         ret = devm_request_irq(&pdev->dev, vf->config_irq,
79                                ifcvf_config_changed, 0,
80                                vf->config_msix_name, vf);
81         if (ret) {
82                 IFCVF_ERR(pdev, "Failed to request config irq\n");
83                 return ret;
84         }
85
86         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
87                 snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
88                          pci_name(pdev), i);
89                 vector = i + IFCVF_MSI_QUEUE_OFF;
90                 irq = pci_irq_vector(pdev, vector);
91                 ret = devm_request_irq(&pdev->dev, irq,
92                                        ifcvf_intr_handler, 0,
93                                        vf->vring[i].msix_name,
94                                        &vf->vring[i]);
95                 if (ret) {
96                         IFCVF_ERR(pdev,
97                                   "Failed to request irq for vq %d\n", i);
98                         ifcvf_free_irq(adapter, i);
99
100                         return ret;
101                 }
102
103                 vf->vring[i].irq = irq;
104         }
105
106         return 0;
107 }
108
109 static int ifcvf_start_datapath(void *private)
110 {
111         struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
112         u8 status;
113         int ret;
114
115         vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
116         ret = ifcvf_start_hw(vf);
117         if (ret < 0) {
118                 status = ifcvf_get_status(vf);
119                 status |= VIRTIO_CONFIG_S_FAILED;
120                 ifcvf_set_status(vf, status);
121         }
122
123         return ret;
124 }
125
126 static int ifcvf_stop_datapath(void *private)
127 {
128         struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
129         int i;
130
131         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
132                 vf->vring[i].cb.callback = NULL;
133
134         ifcvf_stop_hw(vf);
135
136         return 0;
137 }
138
139 static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
140 {
141         struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
142         int i;
143
144         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
145                 vf->vring[i].last_avail_idx = 0;
146                 vf->vring[i].desc = 0;
147                 vf->vring[i].avail = 0;
148                 vf->vring[i].used = 0;
149                 vf->vring[i].ready = 0;
150                 vf->vring[i].cb.callback = NULL;
151                 vf->vring[i].cb.private = NULL;
152         }
153
154         ifcvf_reset(vf);
155 }
156
157 static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
158 {
159         return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
160 }
161
162 static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
163 {
164         struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
165
166         return &adapter->vf;
167 }
168
169 static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
170 {
171         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
172         u64 features;
173
174         features = ifcvf_get_features(vf) & IFCVF_SUPPORTED_FEATURES;
175
176         return features;
177 }
178
179 static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
180 {
181         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
182         int ret;
183
184         ret = ifcvf_verify_min_features(vf, features);
185         if (ret)
186                 return ret;
187
188         vf->req_features = features;
189
190         return 0;
191 }
192
193 static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
194 {
195         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
196
197         return ifcvf_get_status(vf);
198 }
199
200 static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
201 {
202         struct ifcvf_adapter *adapter;
203         struct ifcvf_hw *vf;
204         u8 status_old;
205         int ret;
206
207         vf  = vdpa_to_vf(vdpa_dev);
208         adapter = dev_get_drvdata(vdpa_dev->dev.parent);
209         status_old = ifcvf_get_status(vf);
210
211         if (status_old == status)
212                 return;
213
214         if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) &&
215             !(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
216                 ifcvf_stop_datapath(adapter);
217                 ifcvf_free_irq(adapter, IFCVF_MAX_QUEUE_PAIRS * 2);
218         }
219
220         if (status == 0) {
221                 ifcvf_reset_vring(adapter);
222                 return;
223         }
224
225         if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
226             !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
227                 ret = ifcvf_request_irq(adapter);
228                 if (ret) {
229                         status = ifcvf_get_status(vf);
230                         status |= VIRTIO_CONFIG_S_FAILED;
231                         ifcvf_set_status(vf, status);
232                         return;
233                 }
234
235                 if (ifcvf_start_datapath(adapter) < 0)
236                         IFCVF_ERR(adapter->pdev,
237                                   "Failed to set ifcvf vdpa  status %u\n",
238                                   status);
239         }
240
241         ifcvf_set_status(vf, status);
242 }
243
244 static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
245 {
246         return IFCVF_QUEUE_MAX;
247 }
248
249 static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
250                                    struct vdpa_vq_state *state)
251 {
252         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
253
254         state->avail_index = ifcvf_get_vq_state(vf, qid);
255         return 0;
256 }
257
258 static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
259                                    const struct vdpa_vq_state *state)
260 {
261         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
262
263         return ifcvf_set_vq_state(vf, qid, state->avail_index);
264 }
265
266 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
267                                  struct vdpa_callback *cb)
268 {
269         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
270
271         vf->vring[qid].cb = *cb;
272 }
273
274 static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
275                                     u16 qid, bool ready)
276 {
277         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
278
279         vf->vring[qid].ready = ready;
280 }
281
282 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
283 {
284         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
285
286         return vf->vring[qid].ready;
287 }
288
289 static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
290                                   u32 num)
291 {
292         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
293
294         vf->vring[qid].size = num;
295 }
296
297 static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
298                                      u64 desc_area, u64 driver_area,
299                                      u64 device_area)
300 {
301         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
302
303         vf->vring[qid].desc = desc_area;
304         vf->vring[qid].avail = driver_area;
305         vf->vring[qid].used = device_area;
306
307         return 0;
308 }
309
310 static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
311 {
312         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
313
314         ifcvf_notify_queue(vf, qid);
315 }
316
317 static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
318 {
319         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
320
321         return ioread8(&vf->common_cfg->config_generation);
322 }
323
324 static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
325 {
326         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
327
328         return vf->dev_type;
329 }
330
331 static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
332 {
333         struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
334         struct pci_dev *pdev = adapter->pdev;
335
336         return pdev->subsystem_vendor;
337 }
338
339 static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
340 {
341         return IFCVF_QUEUE_ALIGNMENT;
342 }
343
344 static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
345 {
346         return sizeof(struct virtio_net_config);
347 }
348
349 static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
350                                   unsigned int offset,
351                                   void *buf, unsigned int len)
352 {
353         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
354
355         WARN_ON(offset + len > sizeof(struct virtio_net_config));
356         ifcvf_read_net_config(vf, offset, buf, len);
357 }
358
359 static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
360                                   unsigned int offset, const void *buf,
361                                   unsigned int len)
362 {
363         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
364
365         WARN_ON(offset + len > sizeof(struct virtio_net_config));
366         ifcvf_write_net_config(vf, offset, buf, len);
367 }
368
369 static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
370                                      struct vdpa_callback *cb)
371 {
372         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
373
374         vf->config_cb.callback = cb->callback;
375         vf->config_cb.private = cb->private;
376 }
377
378 static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
379                                  u16 qid)
380 {
381         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
382
383         return vf->vring[qid].irq;
384 }
385
386 /*
387  * IFCVF currently does't have on-chip IOMMU, so not
388  * implemented set_map()/dma_map()/dma_unmap()
389  */
390 static const struct vdpa_config_ops ifc_vdpa_ops = {
391         .get_features   = ifcvf_vdpa_get_features,
392         .set_features   = ifcvf_vdpa_set_features,
393         .get_status     = ifcvf_vdpa_get_status,
394         .set_status     = ifcvf_vdpa_set_status,
395         .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
396         .get_vq_state   = ifcvf_vdpa_get_vq_state,
397         .set_vq_state   = ifcvf_vdpa_set_vq_state,
398         .set_vq_cb      = ifcvf_vdpa_set_vq_cb,
399         .set_vq_ready   = ifcvf_vdpa_set_vq_ready,
400         .get_vq_ready   = ifcvf_vdpa_get_vq_ready,
401         .set_vq_num     = ifcvf_vdpa_set_vq_num,
402         .set_vq_address = ifcvf_vdpa_set_vq_address,
403         .get_vq_irq     = ifcvf_vdpa_get_vq_irq,
404         .kick_vq        = ifcvf_vdpa_kick_vq,
405         .get_generation = ifcvf_vdpa_get_generation,
406         .get_device_id  = ifcvf_vdpa_get_device_id,
407         .get_vendor_id  = ifcvf_vdpa_get_vendor_id,
408         .get_vq_align   = ifcvf_vdpa_get_vq_align,
409         .get_config_size        = ifcvf_vdpa_get_config_size,
410         .get_config     = ifcvf_vdpa_get_config,
411         .set_config     = ifcvf_vdpa_set_config,
412         .set_config_cb  = ifcvf_vdpa_set_config_cb,
413 };
414
415 static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
416 {
417         struct device *dev = &pdev->dev;
418         struct ifcvf_adapter *adapter;
419         struct ifcvf_hw *vf;
420         int ret, i;
421
422         ret = pcim_enable_device(pdev);
423         if (ret) {
424                 IFCVF_ERR(pdev, "Failed to enable device\n");
425                 return ret;
426         }
427
428         ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
429                                  IFCVF_DRIVER_NAME);
430         if (ret) {
431                 IFCVF_ERR(pdev, "Failed to request MMIO region\n");
432                 return ret;
433         }
434
435         ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
436         if (ret) {
437                 IFCVF_ERR(pdev, "No usable DMA configuration\n");
438                 return ret;
439         }
440
441         ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
442         if (ret) {
443                 IFCVF_ERR(pdev,
444                           "Failed for adding devres for freeing irq vectors\n");
445                 return ret;
446         }
447
448         adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
449                                     dev, &ifc_vdpa_ops, NULL);
450         if (adapter == NULL) {
451                 IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
452                 return -ENOMEM;
453         }
454
455         pci_set_master(pdev);
456         pci_set_drvdata(pdev, adapter);
457
458         vf = &adapter->vf;
459
460         /* This drirver drives both modern virtio devices and transitional
461          * devices in modern mode.
462          * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
463          * so legacy devices and transitional devices in legacy
464          * mode will not work for vDPA, this driver will not
465          * drive devices with legacy interface.
466          */
467         if (pdev->device < 0x1040)
468                 vf->dev_type =  pdev->subsystem_device;
469         else
470                 vf->dev_type =  pdev->device - 0x1040;
471
472         vf->base = pcim_iomap_table(pdev);
473
474         adapter->pdev = pdev;
475         adapter->vdpa.dma_dev = &pdev->dev;
476
477         ret = ifcvf_init_hw(vf, pdev);
478         if (ret) {
479                 IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
480                 goto err;
481         }
482
483         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
484                 vf->vring[i].irq = -EINVAL;
485
486         vf->hw_features = ifcvf_get_hw_features(vf);
487
488         ret = vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2);
489         if (ret) {
490                 IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
491                 goto err;
492         }
493
494         return 0;
495
496 err:
497         put_device(&adapter->vdpa.dev);
498         return ret;
499 }
500
501 static void ifcvf_remove(struct pci_dev *pdev)
502 {
503         struct ifcvf_adapter *adapter = pci_get_drvdata(pdev);
504
505         vdpa_unregister_device(&adapter->vdpa);
506 }
507
508 static struct pci_device_id ifcvf_pci_ids[] = {
509         { PCI_DEVICE_SUB(N3000_VENDOR_ID,
510                          N3000_DEVICE_ID,
511                          N3000_SUBSYS_VENDOR_ID,
512                          N3000_SUBSYS_DEVICE_ID) },
513         { PCI_DEVICE_SUB(C5000X_PL_VENDOR_ID,
514                          C5000X_PL_DEVICE_ID,
515                          C5000X_PL_SUBSYS_VENDOR_ID,
516                          C5000X_PL_SUBSYS_DEVICE_ID) },
517
518         { 0 },
519 };
520 MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
521
522 static struct pci_driver ifcvf_driver = {
523         .name     = IFCVF_DRIVER_NAME,
524         .id_table = ifcvf_pci_ids,
525         .probe    = ifcvf_probe,
526         .remove   = ifcvf_remove,
527 };
528
529 module_pci_driver(ifcvf_driver);
530
531 MODULE_LICENSE("GPL v2");