Merge tag 'hardening-v5.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / vdpa / ifcvf / ifcvf_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel IFC VF NIC driver for virtio dataplane offloading
4  *
5  * Copyright (C) 2020 Intel Corporation.
6  *
7  * Author: Zhu Lingshan <lingshan.zhu@intel.com>
8  *
9  */
10
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/sysfs.h>
15 #include "ifcvf_base.h"
16
17 #define DRIVER_AUTHOR   "Intel Corporation"
18 #define IFCVF_DRIVER_NAME       "ifcvf"
19
20 static irqreturn_t ifcvf_config_changed(int irq, void *arg)
21 {
22         struct ifcvf_hw *vf = arg;
23
24         if (vf->config_cb.callback)
25                 return vf->config_cb.callback(vf->config_cb.private);
26
27         return IRQ_HANDLED;
28 }
29
30 static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
31 {
32         struct vring_info *vring = arg;
33
34         if (vring->cb.callback)
35                 return vring->cb.callback(vring->cb.private);
36
37         return IRQ_HANDLED;
38 }
39
40 static void ifcvf_free_irq_vectors(void *data)
41 {
42         pci_free_irq_vectors(data);
43 }
44
45 static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
46 {
47         struct pci_dev *pdev = adapter->pdev;
48         struct ifcvf_hw *vf = &adapter->vf;
49         int i;
50
51
52         for (i = 0; i < queues; i++) {
53                 devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
54                 vf->vring[i].irq = -EINVAL;
55         }
56
57         devm_free_irq(&pdev->dev, vf->config_irq, vf);
58         ifcvf_free_irq_vectors(pdev);
59 }
60
61 static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
62 {
63         struct pci_dev *pdev = adapter->pdev;
64         struct ifcvf_hw *vf = &adapter->vf;
65         int vector, i, ret, irq;
66
67         ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
68                                     IFCVF_MAX_INTR, PCI_IRQ_MSIX);
69         if (ret < 0) {
70                 IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
71                 return ret;
72         }
73
74         snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
75                  pci_name(pdev));
76         vector = 0;
77         vf->config_irq = pci_irq_vector(pdev, vector);
78         ret = devm_request_irq(&pdev->dev, vf->config_irq,
79                                ifcvf_config_changed, 0,
80                                vf->config_msix_name, vf);
81         if (ret) {
82                 IFCVF_ERR(pdev, "Failed to request config irq\n");
83                 return ret;
84         }
85
86         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
87                 snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
88                          pci_name(pdev), i);
89                 vector = i + IFCVF_MSI_QUEUE_OFF;
90                 irq = pci_irq_vector(pdev, vector);
91                 ret = devm_request_irq(&pdev->dev, irq,
92                                        ifcvf_intr_handler, 0,
93                                        vf->vring[i].msix_name,
94                                        &vf->vring[i]);
95                 if (ret) {
96                         IFCVF_ERR(pdev,
97                                   "Failed to request irq for vq %d\n", i);
98                         ifcvf_free_irq(adapter, i);
99
100                         return ret;
101                 }
102
103                 vf->vring[i].irq = irq;
104         }
105
106         return 0;
107 }
108
109 static int ifcvf_start_datapath(void *private)
110 {
111         struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
112         u8 status;
113         int ret;
114
115         vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
116         ret = ifcvf_start_hw(vf);
117         if (ret < 0) {
118                 status = ifcvf_get_status(vf);
119                 status |= VIRTIO_CONFIG_S_FAILED;
120                 ifcvf_set_status(vf, status);
121         }
122
123         return ret;
124 }
125
126 static int ifcvf_stop_datapath(void *private)
127 {
128         struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
129         int i;
130
131         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
132                 vf->vring[i].cb.callback = NULL;
133
134         ifcvf_stop_hw(vf);
135
136         return 0;
137 }
138
139 static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
140 {
141         struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
142         int i;
143
144         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
145                 vf->vring[i].last_avail_idx = 0;
146                 vf->vring[i].desc = 0;
147                 vf->vring[i].avail = 0;
148                 vf->vring[i].used = 0;
149                 vf->vring[i].ready = 0;
150                 vf->vring[i].cb.callback = NULL;
151                 vf->vring[i].cb.private = NULL;
152         }
153
154         ifcvf_reset(vf);
155 }
156
157 static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
158 {
159         return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
160 }
161
162 static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
163 {
164         struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
165
166         return &adapter->vf;
167 }
168
169 static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
170 {
171         struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
172         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
173         struct pci_dev *pdev = adapter->pdev;
174
175         u64 features;
176
177         switch (vf->dev_type) {
178         case VIRTIO_ID_NET:
179                 features = ifcvf_get_features(vf) & IFCVF_NET_SUPPORTED_FEATURES;
180                 break;
181         case VIRTIO_ID_BLOCK:
182                 features = ifcvf_get_features(vf);
183                 break;
184         default:
185                 features = 0;
186                 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
187         }
188
189         return features;
190 }
191
192 static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
193 {
194         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
195         int ret;
196
197         ret = ifcvf_verify_min_features(vf, features);
198         if (ret)
199                 return ret;
200
201         vf->req_features = features;
202
203         return 0;
204 }
205
206 static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
207 {
208         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
209
210         return ifcvf_get_status(vf);
211 }
212
213 static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
214 {
215         struct ifcvf_adapter *adapter;
216         struct ifcvf_hw *vf;
217         u8 status_old;
218         int ret;
219
220         vf  = vdpa_to_vf(vdpa_dev);
221         adapter = dev_get_drvdata(vdpa_dev->dev.parent);
222         status_old = ifcvf_get_status(vf);
223
224         if (status_old == status)
225                 return;
226
227         if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) &&
228             !(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
229                 ifcvf_stop_datapath(adapter);
230                 ifcvf_free_irq(adapter, IFCVF_MAX_QUEUE_PAIRS * 2);
231         }
232
233         if (status == 0) {
234                 ifcvf_reset_vring(adapter);
235                 return;
236         }
237
238         if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
239             !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
240                 ret = ifcvf_request_irq(adapter);
241                 if (ret) {
242                         status = ifcvf_get_status(vf);
243                         status |= VIRTIO_CONFIG_S_FAILED;
244                         ifcvf_set_status(vf, status);
245                         return;
246                 }
247
248                 if (ifcvf_start_datapath(adapter) < 0)
249                         IFCVF_ERR(adapter->pdev,
250                                   "Failed to set ifcvf vdpa  status %u\n",
251                                   status);
252         }
253
254         ifcvf_set_status(vf, status);
255 }
256
257 static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
258 {
259         return IFCVF_QUEUE_MAX;
260 }
261
262 static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
263                                    struct vdpa_vq_state *state)
264 {
265         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
266
267         state->split.avail_index = ifcvf_get_vq_state(vf, qid);
268         return 0;
269 }
270
271 static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
272                                    const struct vdpa_vq_state *state)
273 {
274         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
275
276         return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
277 }
278
279 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
280                                  struct vdpa_callback *cb)
281 {
282         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
283
284         vf->vring[qid].cb = *cb;
285 }
286
287 static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
288                                     u16 qid, bool ready)
289 {
290         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
291
292         vf->vring[qid].ready = ready;
293 }
294
295 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
296 {
297         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
298
299         return vf->vring[qid].ready;
300 }
301
302 static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
303                                   u32 num)
304 {
305         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
306
307         vf->vring[qid].size = num;
308 }
309
310 static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
311                                      u64 desc_area, u64 driver_area,
312                                      u64 device_area)
313 {
314         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
315
316         vf->vring[qid].desc = desc_area;
317         vf->vring[qid].avail = driver_area;
318         vf->vring[qid].used = device_area;
319
320         return 0;
321 }
322
323 static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
324 {
325         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
326
327         ifcvf_notify_queue(vf, qid);
328 }
329
330 static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
331 {
332         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
333
334         return ioread8(&vf->common_cfg->config_generation);
335 }
336
337 static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
338 {
339         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
340
341         return vf->dev_type;
342 }
343
344 static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
345 {
346         struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
347         struct pci_dev *pdev = adapter->pdev;
348
349         return pdev->subsystem_vendor;
350 }
351
352 static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
353 {
354         return IFCVF_QUEUE_ALIGNMENT;
355 }
356
357 static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
358 {
359         struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
360         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
361         struct pci_dev *pdev = adapter->pdev;
362         size_t size;
363
364         switch (vf->dev_type) {
365         case VIRTIO_ID_NET:
366                 size = sizeof(struct virtio_net_config);
367                 break;
368         case VIRTIO_ID_BLOCK:
369                 size = sizeof(struct virtio_blk_config);
370                 break;
371         default:
372                 size = 0;
373                 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
374         }
375
376         return size;
377 }
378
379 static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
380                                   unsigned int offset,
381                                   void *buf, unsigned int len)
382 {
383         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
384
385         WARN_ON(offset + len > sizeof(struct virtio_net_config));
386         ifcvf_read_net_config(vf, offset, buf, len);
387 }
388
389 static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
390                                   unsigned int offset, const void *buf,
391                                   unsigned int len)
392 {
393         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
394
395         WARN_ON(offset + len > sizeof(struct virtio_net_config));
396         ifcvf_write_net_config(vf, offset, buf, len);
397 }
398
399 static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
400                                      struct vdpa_callback *cb)
401 {
402         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
403
404         vf->config_cb.callback = cb->callback;
405         vf->config_cb.private = cb->private;
406 }
407
408 static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
409                                  u16 qid)
410 {
411         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
412
413         return vf->vring[qid].irq;
414 }
415
416 static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
417                                                                u16 idx)
418 {
419         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
420         struct vdpa_notification_area area;
421
422         area.addr = vf->vring[idx].notify_pa;
423         if (!vf->notify_off_multiplier)
424                 area.size = PAGE_SIZE;
425         else
426                 area.size = vf->notify_off_multiplier;
427
428         return area;
429 }
430
431 /*
432  * IFCVF currently does't have on-chip IOMMU, so not
433  * implemented set_map()/dma_map()/dma_unmap()
434  */
435 static const struct vdpa_config_ops ifc_vdpa_ops = {
436         .get_features   = ifcvf_vdpa_get_features,
437         .set_features   = ifcvf_vdpa_set_features,
438         .get_status     = ifcvf_vdpa_get_status,
439         .set_status     = ifcvf_vdpa_set_status,
440         .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
441         .get_vq_state   = ifcvf_vdpa_get_vq_state,
442         .set_vq_state   = ifcvf_vdpa_set_vq_state,
443         .set_vq_cb      = ifcvf_vdpa_set_vq_cb,
444         .set_vq_ready   = ifcvf_vdpa_set_vq_ready,
445         .get_vq_ready   = ifcvf_vdpa_get_vq_ready,
446         .set_vq_num     = ifcvf_vdpa_set_vq_num,
447         .set_vq_address = ifcvf_vdpa_set_vq_address,
448         .get_vq_irq     = ifcvf_vdpa_get_vq_irq,
449         .kick_vq        = ifcvf_vdpa_kick_vq,
450         .get_generation = ifcvf_vdpa_get_generation,
451         .get_device_id  = ifcvf_vdpa_get_device_id,
452         .get_vendor_id  = ifcvf_vdpa_get_vendor_id,
453         .get_vq_align   = ifcvf_vdpa_get_vq_align,
454         .get_config_size        = ifcvf_vdpa_get_config_size,
455         .get_config     = ifcvf_vdpa_get_config,
456         .set_config     = ifcvf_vdpa_set_config,
457         .set_config_cb  = ifcvf_vdpa_set_config_cb,
458         .get_vq_notification = ifcvf_get_vq_notification,
459 };
460
461 static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
462 {
463         struct device *dev = &pdev->dev;
464         struct ifcvf_adapter *adapter;
465         struct ifcvf_hw *vf;
466         int ret, i;
467
468         ret = pcim_enable_device(pdev);
469         if (ret) {
470                 IFCVF_ERR(pdev, "Failed to enable device\n");
471                 return ret;
472         }
473
474         ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
475                                  IFCVF_DRIVER_NAME);
476         if (ret) {
477                 IFCVF_ERR(pdev, "Failed to request MMIO region\n");
478                 return ret;
479         }
480
481         ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
482         if (ret) {
483                 IFCVF_ERR(pdev, "No usable DMA configuration\n");
484                 return ret;
485         }
486
487         ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
488         if (ret) {
489                 IFCVF_ERR(pdev,
490                           "Failed for adding devres for freeing irq vectors\n");
491                 return ret;
492         }
493
494         adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
495                                     dev, &ifc_vdpa_ops, NULL);
496         if (IS_ERR(adapter)) {
497                 IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
498                 return PTR_ERR(adapter);
499         }
500
501         pci_set_master(pdev);
502         pci_set_drvdata(pdev, adapter);
503
504         vf = &adapter->vf;
505
506         /* This drirver drives both modern virtio devices and transitional
507          * devices in modern mode.
508          * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
509          * so legacy devices and transitional devices in legacy
510          * mode will not work for vDPA, this driver will not
511          * drive devices with legacy interface.
512          */
513         if (pdev->device < 0x1040)
514                 vf->dev_type =  pdev->subsystem_device;
515         else
516                 vf->dev_type =  pdev->device - 0x1040;
517
518         vf->base = pcim_iomap_table(pdev);
519
520         adapter->pdev = pdev;
521         adapter->vdpa.dma_dev = &pdev->dev;
522
523         ret = ifcvf_init_hw(vf, pdev);
524         if (ret) {
525                 IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
526                 goto err;
527         }
528
529         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
530                 vf->vring[i].irq = -EINVAL;
531
532         vf->hw_features = ifcvf_get_hw_features(vf);
533
534         ret = vdpa_register_device(&adapter->vdpa, IFCVF_MAX_QUEUE_PAIRS * 2);
535         if (ret) {
536                 IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
537                 goto err;
538         }
539
540         return 0;
541
542 err:
543         put_device(&adapter->vdpa.dev);
544         return ret;
545 }
546
547 static void ifcvf_remove(struct pci_dev *pdev)
548 {
549         struct ifcvf_adapter *adapter = pci_get_drvdata(pdev);
550
551         vdpa_unregister_device(&adapter->vdpa);
552 }
553
554 static struct pci_device_id ifcvf_pci_ids[] = {
555         /* N3000 network device */
556         { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
557                          N3000_DEVICE_ID,
558                          PCI_VENDOR_ID_INTEL,
559                          N3000_SUBSYS_DEVICE_ID) },
560         /* C5000X-PL network device */
561         { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
562                          VIRTIO_TRANS_ID_NET,
563                          PCI_VENDOR_ID_INTEL,
564                          VIRTIO_ID_NET) },
565         /* C5000X-PL block device */
566         { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
567                          VIRTIO_TRANS_ID_BLOCK,
568                          PCI_VENDOR_ID_INTEL,
569                          VIRTIO_ID_BLOCK) },
570
571         { 0 },
572 };
573 MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
574
575 static struct pci_driver ifcvf_driver = {
576         .name     = IFCVF_DRIVER_NAME,
577         .id_table = ifcvf_pci_ids,
578         .probe    = ifcvf_probe,
579         .remove   = ifcvf_remove,
580 };
581
582 module_pci_driver(ifcvf_driver);
583
584 MODULE_LICENSE("GPL v2");