LoongArch: Parse MADT to get multi-processor information
[linux-2.6-microblaze.git] / drivers / vdpa / ifcvf / ifcvf_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel IFC VF NIC driver for virtio dataplane offloading
4  *
5  * Copyright (C) 2020 Intel Corporation.
6  *
7  * Author: Zhu Lingshan <lingshan.zhu@intel.com>
8  *
9  */
10
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/sysfs.h>
15 #include "ifcvf_base.h"
16
17 #define DRIVER_AUTHOR   "Intel Corporation"
18 #define IFCVF_DRIVER_NAME       "ifcvf"
19
20 static irqreturn_t ifcvf_config_changed(int irq, void *arg)
21 {
22         struct ifcvf_hw *vf = arg;
23
24         if (vf->config_cb.callback)
25                 return vf->config_cb.callback(vf->config_cb.private);
26
27         return IRQ_HANDLED;
28 }
29
30 static irqreturn_t ifcvf_vq_intr_handler(int irq, void *arg)
31 {
32         struct vring_info *vring = arg;
33
34         if (vring->cb.callback)
35                 return vring->cb.callback(vring->cb.private);
36
37         return IRQ_HANDLED;
38 }
39
40 static irqreturn_t ifcvf_vqs_reused_intr_handler(int irq, void *arg)
41 {
42         struct ifcvf_hw *vf = arg;
43         struct vring_info *vring;
44         int i;
45
46         for (i = 0; i < vf->nr_vring; i++) {
47                 vring = &vf->vring[i];
48                 if (vring->cb.callback)
49                         vring->cb.callback(vring->cb.private);
50         }
51
52         return IRQ_HANDLED;
53 }
54
55 static irqreturn_t ifcvf_dev_intr_handler(int irq, void *arg)
56 {
57         struct ifcvf_hw *vf = arg;
58         u8 isr;
59
60         isr = vp_ioread8(vf->isr);
61         if (isr & VIRTIO_PCI_ISR_CONFIG)
62                 ifcvf_config_changed(irq, arg);
63
64         return ifcvf_vqs_reused_intr_handler(irq, arg);
65 }
66
67 static void ifcvf_free_irq_vectors(void *data)
68 {
69         pci_free_irq_vectors(data);
70 }
71
72 static void ifcvf_free_per_vq_irq(struct ifcvf_adapter *adapter)
73 {
74         struct pci_dev *pdev = adapter->pdev;
75         struct ifcvf_hw *vf = &adapter->vf;
76         int i;
77
78         for (i = 0; i < vf->nr_vring; i++) {
79                 if (vf->vring[i].irq != -EINVAL) {
80                         devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
81                         vf->vring[i].irq = -EINVAL;
82                 }
83         }
84 }
85
86 static void ifcvf_free_vqs_reused_irq(struct ifcvf_adapter *adapter)
87 {
88         struct pci_dev *pdev = adapter->pdev;
89         struct ifcvf_hw *vf = &adapter->vf;
90
91         if (vf->vqs_reused_irq != -EINVAL) {
92                 devm_free_irq(&pdev->dev, vf->vqs_reused_irq, vf);
93                 vf->vqs_reused_irq = -EINVAL;
94         }
95
96 }
97
98 static void ifcvf_free_vq_irq(struct ifcvf_adapter *adapter)
99 {
100         struct ifcvf_hw *vf = &adapter->vf;
101
102         if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
103                 ifcvf_free_per_vq_irq(adapter);
104         else
105                 ifcvf_free_vqs_reused_irq(adapter);
106 }
107
108 static void ifcvf_free_config_irq(struct ifcvf_adapter *adapter)
109 {
110         struct pci_dev *pdev = adapter->pdev;
111         struct ifcvf_hw *vf = &adapter->vf;
112
113         if (vf->config_irq == -EINVAL)
114                 return;
115
116         /* If the irq is shared by all vqs and the config interrupt,
117          * it is already freed in ifcvf_free_vq_irq, so here only
118          * need to free config irq when msix_vector_status != MSIX_VECTOR_DEV_SHARED
119          */
120         if (vf->msix_vector_status != MSIX_VECTOR_DEV_SHARED) {
121                 devm_free_irq(&pdev->dev, vf->config_irq, vf);
122                 vf->config_irq = -EINVAL;
123         }
124 }
125
126 static void ifcvf_free_irq(struct ifcvf_adapter *adapter)
127 {
128         struct pci_dev *pdev = adapter->pdev;
129
130         ifcvf_free_vq_irq(adapter);
131         ifcvf_free_config_irq(adapter);
132         ifcvf_free_irq_vectors(pdev);
133 }
134
135 /* ifcvf MSIX vectors allocator, this helper tries to allocate
136  * vectors for all virtqueues and the config interrupt.
137  * It returns the number of allocated vectors, negative
138  * return value when fails.
139  */
140 static int ifcvf_alloc_vectors(struct ifcvf_adapter *adapter)
141 {
142         struct pci_dev *pdev = adapter->pdev;
143         struct ifcvf_hw *vf = &adapter->vf;
144         int max_intr, ret;
145
146         /* all queues and config interrupt  */
147         max_intr = vf->nr_vring + 1;
148         ret = pci_alloc_irq_vectors(pdev, 1, max_intr, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
149
150         if (ret < 0) {
151                 IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
152                 return ret;
153         }
154
155         if (ret < max_intr)
156                 IFCVF_INFO(pdev,
157                            "Requested %u vectors, however only %u allocated, lower performance\n",
158                            max_intr, ret);
159
160         return ret;
161 }
162
163 static int ifcvf_request_per_vq_irq(struct ifcvf_adapter *adapter)
164 {
165         struct pci_dev *pdev = adapter->pdev;
166         struct ifcvf_hw *vf = &adapter->vf;
167         int i, vector, ret, irq;
168
169         vf->vqs_reused_irq = -EINVAL;
170         for (i = 0; i < vf->nr_vring; i++) {
171                 snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", pci_name(pdev), i);
172                 vector = i;
173                 irq = pci_irq_vector(pdev, vector);
174                 ret = devm_request_irq(&pdev->dev, irq,
175                                        ifcvf_vq_intr_handler, 0,
176                                        vf->vring[i].msix_name,
177                                        &vf->vring[i]);
178                 if (ret) {
179                         IFCVF_ERR(pdev, "Failed to request irq for vq %d\n", i);
180                         goto err;
181                 }
182
183                 vf->vring[i].irq = irq;
184                 ret = ifcvf_set_vq_vector(vf, i, vector);
185                 if (ret == VIRTIO_MSI_NO_VECTOR) {
186                         IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
187                         goto err;
188                 }
189         }
190
191         return 0;
192 err:
193         ifcvf_free_irq(adapter);
194
195         return -EFAULT;
196 }
197
198 static int ifcvf_request_vqs_reused_irq(struct ifcvf_adapter *adapter)
199 {
200         struct pci_dev *pdev = adapter->pdev;
201         struct ifcvf_hw *vf = &adapter->vf;
202         int i, vector, ret, irq;
203
204         vector = 0;
205         snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-vqs-reused-irq\n", pci_name(pdev));
206         irq = pci_irq_vector(pdev, vector);
207         ret = devm_request_irq(&pdev->dev, irq,
208                                ifcvf_vqs_reused_intr_handler, 0,
209                                vf->vring[0].msix_name, vf);
210         if (ret) {
211                 IFCVF_ERR(pdev, "Failed to request reused irq for the device\n");
212                 goto err;
213         }
214
215         vf->vqs_reused_irq = irq;
216         for (i = 0; i < vf->nr_vring; i++) {
217                 vf->vring[i].irq = -EINVAL;
218                 ret = ifcvf_set_vq_vector(vf, i, vector);
219                 if (ret == VIRTIO_MSI_NO_VECTOR) {
220                         IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
221                         goto err;
222                 }
223         }
224
225         return 0;
226 err:
227         ifcvf_free_irq(adapter);
228
229         return -EFAULT;
230 }
231
232 static int ifcvf_request_dev_irq(struct ifcvf_adapter *adapter)
233 {
234         struct pci_dev *pdev = adapter->pdev;
235         struct ifcvf_hw *vf = &adapter->vf;
236         int i, vector, ret, irq;
237
238         vector = 0;
239         snprintf(vf->vring[0].msix_name, 256, "ifcvf[%s]-dev-irq\n", pci_name(pdev));
240         irq = pci_irq_vector(pdev, vector);
241         ret = devm_request_irq(&pdev->dev, irq,
242                                ifcvf_dev_intr_handler, 0,
243                                vf->vring[0].msix_name, vf);
244         if (ret) {
245                 IFCVF_ERR(pdev, "Failed to request irq for the device\n");
246                 goto err;
247         }
248
249         vf->vqs_reused_irq = irq;
250         for (i = 0; i < vf->nr_vring; i++) {
251                 vf->vring[i].irq = -EINVAL;
252                 ret = ifcvf_set_vq_vector(vf, i, vector);
253                 if (ret == VIRTIO_MSI_NO_VECTOR) {
254                         IFCVF_ERR(pdev, "No msix vector for vq %u\n", i);
255                         goto err;
256                 }
257         }
258
259         vf->config_irq = irq;
260         ret = ifcvf_set_config_vector(vf, vector);
261         if (ret == VIRTIO_MSI_NO_VECTOR) {
262                 IFCVF_ERR(pdev, "No msix vector for device config\n");
263                 goto err;
264         }
265
266         return 0;
267 err:
268         ifcvf_free_irq(adapter);
269
270         return -EFAULT;
271
272 }
273
274 static int ifcvf_request_vq_irq(struct ifcvf_adapter *adapter)
275 {
276         struct ifcvf_hw *vf = &adapter->vf;
277         int ret;
278
279         if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
280                 ret = ifcvf_request_per_vq_irq(adapter);
281         else
282                 ret = ifcvf_request_vqs_reused_irq(adapter);
283
284         return ret;
285 }
286
287 static int ifcvf_request_config_irq(struct ifcvf_adapter *adapter)
288 {
289         struct pci_dev *pdev = adapter->pdev;
290         struct ifcvf_hw *vf = &adapter->vf;
291         int config_vector, ret;
292
293         if (vf->msix_vector_status == MSIX_VECTOR_PER_VQ_AND_CONFIG)
294                 config_vector = vf->nr_vring;
295         else if (vf->msix_vector_status ==  MSIX_VECTOR_SHARED_VQ_AND_CONFIG)
296                 /* vector 0 for vqs and 1 for config interrupt */
297                 config_vector = 1;
298         else if (vf->msix_vector_status == MSIX_VECTOR_DEV_SHARED)
299                 /* re-use the vqs vector */
300                 return 0;
301         else
302                 return -EINVAL;
303
304         snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
305                  pci_name(pdev));
306         vf->config_irq = pci_irq_vector(pdev, config_vector);
307         ret = devm_request_irq(&pdev->dev, vf->config_irq,
308                                ifcvf_config_changed, 0,
309                                vf->config_msix_name, vf);
310         if (ret) {
311                 IFCVF_ERR(pdev, "Failed to request config irq\n");
312                 goto err;
313         }
314
315         ret = ifcvf_set_config_vector(vf, config_vector);
316         if (ret == VIRTIO_MSI_NO_VECTOR) {
317                 IFCVF_ERR(pdev, "No msix vector for device config\n");
318                 goto err;
319         }
320
321         return 0;
322 err:
323         ifcvf_free_irq(adapter);
324
325         return -EFAULT;
326 }
327
328 static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
329 {
330         struct ifcvf_hw *vf = &adapter->vf;
331         int nvectors, ret, max_intr;
332
333         nvectors = ifcvf_alloc_vectors(adapter);
334         if (nvectors <= 0)
335                 return -EFAULT;
336
337         vf->msix_vector_status = MSIX_VECTOR_PER_VQ_AND_CONFIG;
338         max_intr = vf->nr_vring + 1;
339         if (nvectors < max_intr)
340                 vf->msix_vector_status = MSIX_VECTOR_SHARED_VQ_AND_CONFIG;
341
342         if (nvectors == 1) {
343                 vf->msix_vector_status = MSIX_VECTOR_DEV_SHARED;
344                 ret = ifcvf_request_dev_irq(adapter);
345
346                 return ret;
347         }
348
349         ret = ifcvf_request_vq_irq(adapter);
350         if (ret)
351                 return ret;
352
353         ret = ifcvf_request_config_irq(adapter);
354
355         if (ret)
356                 return ret;
357
358         return 0;
359 }
360
361 static int ifcvf_start_datapath(void *private)
362 {
363         struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
364         u8 status;
365         int ret;
366
367         ret = ifcvf_start_hw(vf);
368         if (ret < 0) {
369                 status = ifcvf_get_status(vf);
370                 status |= VIRTIO_CONFIG_S_FAILED;
371                 ifcvf_set_status(vf, status);
372         }
373
374         return ret;
375 }
376
377 static int ifcvf_stop_datapath(void *private)
378 {
379         struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
380         int i;
381
382         for (i = 0; i < vf->nr_vring; i++)
383                 vf->vring[i].cb.callback = NULL;
384
385         ifcvf_stop_hw(vf);
386
387         return 0;
388 }
389
390 static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
391 {
392         struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
393         int i;
394
395         for (i = 0; i < vf->nr_vring; i++) {
396                 vf->vring[i].last_avail_idx = 0;
397                 vf->vring[i].desc = 0;
398                 vf->vring[i].avail = 0;
399                 vf->vring[i].used = 0;
400                 vf->vring[i].ready = 0;
401                 vf->vring[i].cb.callback = NULL;
402                 vf->vring[i].cb.private = NULL;
403         }
404
405         ifcvf_reset(vf);
406 }
407
408 static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
409 {
410         return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
411 }
412
413 static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
414 {
415         struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
416
417         return &adapter->vf;
418 }
419
420 static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
421 {
422         struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
423         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
424         struct pci_dev *pdev = adapter->pdev;
425         u32 type = vf->dev_type;
426         u64 features;
427
428         if (type == VIRTIO_ID_NET || type == VIRTIO_ID_BLOCK)
429                 features = ifcvf_get_features(vf);
430         else {
431                 features = 0;
432                 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
433         }
434
435         return features;
436 }
437
438 static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features)
439 {
440         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
441         int ret;
442
443         ret = ifcvf_verify_min_features(vf, features);
444         if (ret)
445                 return ret;
446
447         vf->req_features = features;
448
449         return 0;
450 }
451
452 static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
453 {
454         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
455
456         return vf->req_features;
457 }
458
459 static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
460 {
461         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
462
463         return ifcvf_get_status(vf);
464 }
465
466 static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
467 {
468         struct ifcvf_adapter *adapter;
469         struct ifcvf_hw *vf;
470         u8 status_old;
471         int ret;
472
473         vf  = vdpa_to_vf(vdpa_dev);
474         adapter = vdpa_to_adapter(vdpa_dev);
475         status_old = ifcvf_get_status(vf);
476
477         if (status_old == status)
478                 return;
479
480         if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
481             !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
482                 ret = ifcvf_request_irq(adapter);
483                 if (ret) {
484                         status = ifcvf_get_status(vf);
485                         status |= VIRTIO_CONFIG_S_FAILED;
486                         ifcvf_set_status(vf, status);
487                         return;
488                 }
489
490                 if (ifcvf_start_datapath(adapter) < 0)
491                         IFCVF_ERR(adapter->pdev,
492                                   "Failed to set ifcvf vdpa  status %u\n",
493                                   status);
494         }
495
496         ifcvf_set_status(vf, status);
497 }
498
499 static int ifcvf_vdpa_reset(struct vdpa_device *vdpa_dev)
500 {
501         struct ifcvf_adapter *adapter;
502         struct ifcvf_hw *vf;
503         u8 status_old;
504
505         vf  = vdpa_to_vf(vdpa_dev);
506         adapter = vdpa_to_adapter(vdpa_dev);
507         status_old = ifcvf_get_status(vf);
508
509         if (status_old == 0)
510                 return 0;
511
512         if (status_old & VIRTIO_CONFIG_S_DRIVER_OK) {
513                 ifcvf_stop_datapath(adapter);
514                 ifcvf_free_irq(adapter);
515         }
516
517         ifcvf_reset_vring(adapter);
518
519         return 0;
520 }
521
522 static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
523 {
524         return IFCVF_QUEUE_MAX;
525 }
526
527 static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
528                                    struct vdpa_vq_state *state)
529 {
530         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
531
532         state->split.avail_index = ifcvf_get_vq_state(vf, qid);
533         return 0;
534 }
535
536 static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
537                                    const struct vdpa_vq_state *state)
538 {
539         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
540
541         return ifcvf_set_vq_state(vf, qid, state->split.avail_index);
542 }
543
544 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
545                                  struct vdpa_callback *cb)
546 {
547         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
548
549         vf->vring[qid].cb = *cb;
550 }
551
552 static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
553                                     u16 qid, bool ready)
554 {
555         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
556
557         vf->vring[qid].ready = ready;
558 }
559
560 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
561 {
562         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
563
564         return vf->vring[qid].ready;
565 }
566
567 static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
568                                   u32 num)
569 {
570         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
571
572         vf->vring[qid].size = num;
573 }
574
575 static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
576                                      u64 desc_area, u64 driver_area,
577                                      u64 device_area)
578 {
579         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
580
581         vf->vring[qid].desc = desc_area;
582         vf->vring[qid].avail = driver_area;
583         vf->vring[qid].used = device_area;
584
585         return 0;
586 }
587
588 static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
589 {
590         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
591
592         ifcvf_notify_queue(vf, qid);
593 }
594
595 static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
596 {
597         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
598
599         return vp_ioread8(&vf->common_cfg->config_generation);
600 }
601
602 static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
603 {
604         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
605
606         return vf->dev_type;
607 }
608
609 static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
610 {
611         struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
612         struct pci_dev *pdev = adapter->pdev;
613
614         return pdev->subsystem_vendor;
615 }
616
617 static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
618 {
619         return IFCVF_QUEUE_ALIGNMENT;
620 }
621
622 static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
623 {
624         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
625
626         return  vf->config_size;
627 }
628
629 static u32 ifcvf_vdpa_get_vq_group(struct vdpa_device *vdpa, u16 idx)
630 {
631         return 0;
632 }
633
634 static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
635                                   unsigned int offset,
636                                   void *buf, unsigned int len)
637 {
638         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
639
640         ifcvf_read_dev_config(vf, offset, buf, len);
641 }
642
643 static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
644                                   unsigned int offset, const void *buf,
645                                   unsigned int len)
646 {
647         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
648
649         ifcvf_write_dev_config(vf, offset, buf, len);
650 }
651
652 static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
653                                      struct vdpa_callback *cb)
654 {
655         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
656
657         vf->config_cb.callback = cb->callback;
658         vf->config_cb.private = cb->private;
659 }
660
661 static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
662                                  u16 qid)
663 {
664         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
665
666         if (vf->vqs_reused_irq < 0)
667                 return vf->vring[qid].irq;
668         else
669                 return -EINVAL;
670 }
671
672 static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
673                                                                u16 idx)
674 {
675         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
676         struct vdpa_notification_area area;
677
678         area.addr = vf->vring[idx].notify_pa;
679         if (!vf->notify_off_multiplier)
680                 area.size = PAGE_SIZE;
681         else
682                 area.size = vf->notify_off_multiplier;
683
684         return area;
685 }
686
687 /*
688  * IFCVF currently does't have on-chip IOMMU, so not
689  * implemented set_map()/dma_map()/dma_unmap()
690  */
691 static const struct vdpa_config_ops ifc_vdpa_ops = {
692         .get_device_features = ifcvf_vdpa_get_device_features,
693         .set_driver_features = ifcvf_vdpa_set_driver_features,
694         .get_driver_features = ifcvf_vdpa_get_driver_features,
695         .get_status     = ifcvf_vdpa_get_status,
696         .set_status     = ifcvf_vdpa_set_status,
697         .reset          = ifcvf_vdpa_reset,
698         .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
699         .get_vq_state   = ifcvf_vdpa_get_vq_state,
700         .set_vq_state   = ifcvf_vdpa_set_vq_state,
701         .set_vq_cb      = ifcvf_vdpa_set_vq_cb,
702         .set_vq_ready   = ifcvf_vdpa_set_vq_ready,
703         .get_vq_ready   = ifcvf_vdpa_get_vq_ready,
704         .set_vq_num     = ifcvf_vdpa_set_vq_num,
705         .set_vq_address = ifcvf_vdpa_set_vq_address,
706         .get_vq_irq     = ifcvf_vdpa_get_vq_irq,
707         .kick_vq        = ifcvf_vdpa_kick_vq,
708         .get_generation = ifcvf_vdpa_get_generation,
709         .get_device_id  = ifcvf_vdpa_get_device_id,
710         .get_vendor_id  = ifcvf_vdpa_get_vendor_id,
711         .get_vq_align   = ifcvf_vdpa_get_vq_align,
712         .get_vq_group   = ifcvf_vdpa_get_vq_group,
713         .get_config_size        = ifcvf_vdpa_get_config_size,
714         .get_config     = ifcvf_vdpa_get_config,
715         .set_config     = ifcvf_vdpa_set_config,
716         .set_config_cb  = ifcvf_vdpa_set_config_cb,
717         .get_vq_notification = ifcvf_get_vq_notification,
718 };
719
720 static struct virtio_device_id id_table_net[] = {
721         {VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID},
722         {0},
723 };
724
725 static struct virtio_device_id id_table_blk[] = {
726         {VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID},
727         {0},
728 };
729
730 static u32 get_dev_type(struct pci_dev *pdev)
731 {
732         u32 dev_type;
733
734         /* This drirver drives both modern virtio devices and transitional
735          * devices in modern mode.
736          * vDPA requires feature bit VIRTIO_F_ACCESS_PLATFORM,
737          * so legacy devices and transitional devices in legacy
738          * mode will not work for vDPA, this driver will not
739          * drive devices with legacy interface.
740          */
741
742         if (pdev->device < 0x1040)
743                 dev_type =  pdev->subsystem_device;
744         else
745                 dev_type =  pdev->device - 0x1040;
746
747         return dev_type;
748 }
749
750 static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
751                               const struct vdpa_dev_set_config *config)
752 {
753         struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
754         struct ifcvf_adapter *adapter;
755         struct pci_dev *pdev;
756         struct ifcvf_hw *vf;
757         struct device *dev;
758         int ret, i;
759
760         ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
761         if (ifcvf_mgmt_dev->adapter)
762                 return -EOPNOTSUPP;
763
764         pdev = ifcvf_mgmt_dev->pdev;
765         dev = &pdev->dev;
766         adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
767                                     dev, &ifc_vdpa_ops, 1, 1, name, false);
768         if (IS_ERR(adapter)) {
769                 IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
770                 return PTR_ERR(adapter);
771         }
772
773         ifcvf_mgmt_dev->adapter = adapter;
774
775         vf = &adapter->vf;
776         vf->dev_type = get_dev_type(pdev);
777         vf->base = pcim_iomap_table(pdev);
778
779         adapter->pdev = pdev;
780         adapter->vdpa.dma_dev = &pdev->dev;
781
782         ret = ifcvf_init_hw(vf, pdev);
783         if (ret) {
784                 IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
785                 goto err;
786         }
787
788         for (i = 0; i < vf->nr_vring; i++)
789                 vf->vring[i].irq = -EINVAL;
790
791         vf->hw_features = ifcvf_get_hw_features(vf);
792         vf->config_size = ifcvf_get_config_size(vf);
793
794         adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
795         ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
796         if (ret) {
797                 IFCVF_ERR(pdev, "Failed to register to vDPA bus");
798                 goto err;
799         }
800
801         return 0;
802
803 err:
804         put_device(&adapter->vdpa.dev);
805         return ret;
806 }
807
808 static void ifcvf_vdpa_dev_del(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev)
809 {
810         struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
811
812         ifcvf_mgmt_dev = container_of(mdev, struct ifcvf_vdpa_mgmt_dev, mdev);
813         _vdpa_unregister_device(dev);
814         ifcvf_mgmt_dev->adapter = NULL;
815 }
816
817 static const struct vdpa_mgmtdev_ops ifcvf_vdpa_mgmt_dev_ops = {
818         .dev_add = ifcvf_vdpa_dev_add,
819         .dev_del = ifcvf_vdpa_dev_del
820 };
821
822 static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
823 {
824         struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
825         struct device *dev = &pdev->dev;
826         u32 dev_type;
827         int ret;
828
829         ifcvf_mgmt_dev = kzalloc(sizeof(struct ifcvf_vdpa_mgmt_dev), GFP_KERNEL);
830         if (!ifcvf_mgmt_dev) {
831                 IFCVF_ERR(pdev, "Failed to alloc memory for the vDPA management device\n");
832                 return -ENOMEM;
833         }
834
835         dev_type = get_dev_type(pdev);
836         switch (dev_type) {
837         case VIRTIO_ID_NET:
838                 ifcvf_mgmt_dev->mdev.id_table = id_table_net;
839                 break;
840         case VIRTIO_ID_BLOCK:
841                 ifcvf_mgmt_dev->mdev.id_table = id_table_blk;
842                 break;
843         default:
844                 IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", dev_type);
845                 ret = -EOPNOTSUPP;
846                 goto err;
847         }
848
849         ifcvf_mgmt_dev->mdev.ops = &ifcvf_vdpa_mgmt_dev_ops;
850         ifcvf_mgmt_dev->mdev.device = dev;
851         ifcvf_mgmt_dev->pdev = pdev;
852
853         ret = pcim_enable_device(pdev);
854         if (ret) {
855                 IFCVF_ERR(pdev, "Failed to enable device\n");
856                 goto err;
857         }
858
859         ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
860                                  IFCVF_DRIVER_NAME);
861         if (ret) {
862                 IFCVF_ERR(pdev, "Failed to request MMIO region\n");
863                 goto err;
864         }
865
866         ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
867         if (ret) {
868                 IFCVF_ERR(pdev, "No usable DMA configuration\n");
869                 goto err;
870         }
871
872         ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
873         if (ret) {
874                 IFCVF_ERR(pdev,
875                           "Failed for adding devres for freeing irq vectors\n");
876                 goto err;
877         }
878
879         pci_set_master(pdev);
880
881         ret = vdpa_mgmtdev_register(&ifcvf_mgmt_dev->mdev);
882         if (ret) {
883                 IFCVF_ERR(pdev,
884                           "Failed to initialize the management interfaces\n");
885                 goto err;
886         }
887
888         pci_set_drvdata(pdev, ifcvf_mgmt_dev);
889
890         return 0;
891
892 err:
893         kfree(ifcvf_mgmt_dev);
894         return ret;
895 }
896
897 static void ifcvf_remove(struct pci_dev *pdev)
898 {
899         struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
900
901         ifcvf_mgmt_dev = pci_get_drvdata(pdev);
902         vdpa_mgmtdev_unregister(&ifcvf_mgmt_dev->mdev);
903         kfree(ifcvf_mgmt_dev);
904 }
905
906 static struct pci_device_id ifcvf_pci_ids[] = {
907         /* N3000 network device */
908         { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
909                          N3000_DEVICE_ID,
910                          PCI_VENDOR_ID_INTEL,
911                          N3000_SUBSYS_DEVICE_ID) },
912         /* C5000X-PL network device */
913         { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
914                          VIRTIO_TRANS_ID_NET,
915                          PCI_VENDOR_ID_INTEL,
916                          VIRTIO_ID_NET) },
917         /* C5000X-PL block device */
918         { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
919                          VIRTIO_TRANS_ID_BLOCK,
920                          PCI_VENDOR_ID_INTEL,
921                          VIRTIO_ID_BLOCK) },
922
923         { 0 },
924 };
925 MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
926
927 static struct pci_driver ifcvf_driver = {
928         .name     = IFCVF_DRIVER_NAME,
929         .id_table = ifcvf_pci_ids,
930         .probe    = ifcvf_probe,
931         .remove   = ifcvf_remove,
932 };
933
934 module_pci_driver(ifcvf_driver);
935
936 MODULE_LICENSE("GPL v2");