Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[linux-2.6-microblaze.git] / drivers / vdpa / ifcvf / ifcvf_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel IFC VF NIC driver for virtio dataplane offloading
4  *
5  * Copyright (C) 2020 Intel Corporation.
6  *
7  * Author: Zhu Lingshan <lingshan.zhu@intel.com>
8  *
9  */
10
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/sysfs.h>
15 #include "ifcvf_base.h"
16
17 #define VERSION_STRING  "0.1"
18 #define DRIVER_AUTHOR   "Intel Corporation"
19 #define IFCVF_DRIVER_NAME       "ifcvf"
20
21 static irqreturn_t ifcvf_config_changed(int irq, void *arg)
22 {
23         struct ifcvf_hw *vf = arg;
24
25         if (vf->config_cb.callback)
26                 return vf->config_cb.callback(vf->config_cb.private);
27
28         return IRQ_HANDLED;
29 }
30
31 static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
32 {
33         struct vring_info *vring = arg;
34
35         if (vring->cb.callback)
36                 return vring->cb.callback(vring->cb.private);
37
38         return IRQ_HANDLED;
39 }
40
41 static void ifcvf_free_irq_vectors(void *data)
42 {
43         pci_free_irq_vectors(data);
44 }
45
46 static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues)
47 {
48         struct pci_dev *pdev = adapter->pdev;
49         struct ifcvf_hw *vf = &adapter->vf;
50         int i;
51
52
53         for (i = 0; i < queues; i++) {
54                 devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]);
55                 vf->vring[i].irq = -EINVAL;
56         }
57
58         devm_free_irq(&pdev->dev, vf->config_irq, vf);
59         ifcvf_free_irq_vectors(pdev);
60 }
61
62 static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
63 {
64         struct pci_dev *pdev = adapter->pdev;
65         struct ifcvf_hw *vf = &adapter->vf;
66         int vector, i, ret, irq;
67
68         ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
69                                     IFCVF_MAX_INTR, PCI_IRQ_MSIX);
70         if (ret < 0) {
71                 IFCVF_ERR(pdev, "Failed to alloc IRQ vectors\n");
72                 return ret;
73         }
74
75         snprintf(vf->config_msix_name, 256, "ifcvf[%s]-config\n",
76                  pci_name(pdev));
77         vector = 0;
78         vf->config_irq = pci_irq_vector(pdev, vector);
79         ret = devm_request_irq(&pdev->dev, vf->config_irq,
80                                ifcvf_config_changed, 0,
81                                vf->config_msix_name, vf);
82         if (ret) {
83                 IFCVF_ERR(pdev, "Failed to request config irq\n");
84                 return ret;
85         }
86
87         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
88                 snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
89                          pci_name(pdev), i);
90                 vector = i + IFCVF_MSI_QUEUE_OFF;
91                 irq = pci_irq_vector(pdev, vector);
92                 ret = devm_request_irq(&pdev->dev, irq,
93                                        ifcvf_intr_handler, 0,
94                                        vf->vring[i].msix_name,
95                                        &vf->vring[i]);
96                 if (ret) {
97                         IFCVF_ERR(pdev,
98                                   "Failed to request irq for vq %d\n", i);
99                         ifcvf_free_irq(adapter, i);
100
101                         return ret;
102                 }
103
104                 vf->vring[i].irq = irq;
105         }
106
107         return 0;
108 }
109
110 static int ifcvf_start_datapath(void *private)
111 {
112         struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
113         u8 status;
114         int ret;
115
116         vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
117         ret = ifcvf_start_hw(vf);
118         if (ret < 0) {
119                 status = ifcvf_get_status(vf);
120                 status |= VIRTIO_CONFIG_S_FAILED;
121                 ifcvf_set_status(vf, status);
122         }
123
124         return ret;
125 }
126
127 static int ifcvf_stop_datapath(void *private)
128 {
129         struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
130         int i;
131
132         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
133                 vf->vring[i].cb.callback = NULL;
134
135         ifcvf_stop_hw(vf);
136
137         return 0;
138 }
139
140 static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
141 {
142         struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
143         int i;
144
145         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
146                 vf->vring[i].last_avail_idx = 0;
147                 vf->vring[i].desc = 0;
148                 vf->vring[i].avail = 0;
149                 vf->vring[i].used = 0;
150                 vf->vring[i].ready = 0;
151                 vf->vring[i].cb.callback = NULL;
152                 vf->vring[i].cb.private = NULL;
153         }
154
155         ifcvf_reset(vf);
156 }
157
158 static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
159 {
160         return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
161 }
162
163 static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
164 {
165         struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
166
167         return &adapter->vf;
168 }
169
170 static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
171 {
172         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
173         u64 features;
174
175         features = ifcvf_get_features(vf) & IFCVF_SUPPORTED_FEATURES;
176
177         return features;
178 }
179
180 static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
181 {
182         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
183
184         vf->req_features = features;
185
186         return 0;
187 }
188
189 static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
190 {
191         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
192
193         return ifcvf_get_status(vf);
194 }
195
196 static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
197 {
198         struct ifcvf_adapter *adapter;
199         struct ifcvf_hw *vf;
200         u8 status_old;
201         int ret;
202
203         vf  = vdpa_to_vf(vdpa_dev);
204         adapter = dev_get_drvdata(vdpa_dev->dev.parent);
205         status_old = ifcvf_get_status(vf);
206
207         if (status_old == status)
208                 return;
209
210         if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) &&
211             !(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
212                 ifcvf_stop_datapath(adapter);
213                 ifcvf_free_irq(adapter, IFCVF_MAX_QUEUE_PAIRS * 2);
214         }
215
216         if (status == 0) {
217                 ifcvf_reset_vring(adapter);
218                 return;
219         }
220
221         if ((status & VIRTIO_CONFIG_S_DRIVER_OK) &&
222             !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) {
223                 ret = ifcvf_request_irq(adapter);
224                 if (ret) {
225                         status = ifcvf_get_status(vf);
226                         status |= VIRTIO_CONFIG_S_FAILED;
227                         ifcvf_set_status(vf, status);
228                         return;
229                 }
230
231                 if (ifcvf_start_datapath(adapter) < 0)
232                         IFCVF_ERR(adapter->pdev,
233                                   "Failed to set ifcvf vdpa  status %u\n",
234                                   status);
235         }
236
237         ifcvf_set_status(vf, status);
238 }
239
240 static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
241 {
242         return IFCVF_QUEUE_MAX;
243 }
244
245 static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
246                                    struct vdpa_vq_state *state)
247 {
248         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
249
250         state->avail_index = ifcvf_get_vq_state(vf, qid);
251         return 0;
252 }
253
254 static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
255                                    const struct vdpa_vq_state *state)
256 {
257         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
258
259         return ifcvf_set_vq_state(vf, qid, state->avail_index);
260 }
261
262 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
263                                  struct vdpa_callback *cb)
264 {
265         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
266
267         vf->vring[qid].cb = *cb;
268 }
269
270 static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
271                                     u16 qid, bool ready)
272 {
273         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
274
275         vf->vring[qid].ready = ready;
276 }
277
278 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
279 {
280         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
281
282         return vf->vring[qid].ready;
283 }
284
285 static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
286                                   u32 num)
287 {
288         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
289
290         vf->vring[qid].size = num;
291 }
292
293 static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
294                                      u64 desc_area, u64 driver_area,
295                                      u64 device_area)
296 {
297         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
298
299         vf->vring[qid].desc = desc_area;
300         vf->vring[qid].avail = driver_area;
301         vf->vring[qid].used = device_area;
302
303         return 0;
304 }
305
306 static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
307 {
308         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
309
310         ifcvf_notify_queue(vf, qid);
311 }
312
313 static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
314 {
315         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
316
317         return ioread8(&vf->common_cfg->config_generation);
318 }
319
320 static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
321 {
322         return VIRTIO_ID_NET;
323 }
324
325 static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
326 {
327         return IFCVF_SUBSYS_VENDOR_ID;
328 }
329
330 static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
331 {
332         return IFCVF_QUEUE_ALIGNMENT;
333 }
334
335 static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
336                                   unsigned int offset,
337                                   void *buf, unsigned int len)
338 {
339         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
340
341         WARN_ON(offset + len > sizeof(struct virtio_net_config));
342         ifcvf_read_net_config(vf, offset, buf, len);
343 }
344
345 static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
346                                   unsigned int offset, const void *buf,
347                                   unsigned int len)
348 {
349         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
350
351         WARN_ON(offset + len > sizeof(struct virtio_net_config));
352         ifcvf_write_net_config(vf, offset, buf, len);
353 }
354
355 static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
356                                      struct vdpa_callback *cb)
357 {
358         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
359
360         vf->config_cb.callback = cb->callback;
361         vf->config_cb.private = cb->private;
362 }
363
364 static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
365                                  u16 qid)
366 {
367         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
368
369         return vf->vring[qid].irq;
370 }
371
372 /*
373  * IFCVF currently does't have on-chip IOMMU, so not
374  * implemented set_map()/dma_map()/dma_unmap()
375  */
376 static const struct vdpa_config_ops ifc_vdpa_ops = {
377         .get_features   = ifcvf_vdpa_get_features,
378         .set_features   = ifcvf_vdpa_set_features,
379         .get_status     = ifcvf_vdpa_get_status,
380         .set_status     = ifcvf_vdpa_set_status,
381         .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
382         .get_vq_state   = ifcvf_vdpa_get_vq_state,
383         .set_vq_state   = ifcvf_vdpa_set_vq_state,
384         .set_vq_cb      = ifcvf_vdpa_set_vq_cb,
385         .set_vq_ready   = ifcvf_vdpa_set_vq_ready,
386         .get_vq_ready   = ifcvf_vdpa_get_vq_ready,
387         .set_vq_num     = ifcvf_vdpa_set_vq_num,
388         .set_vq_address = ifcvf_vdpa_set_vq_address,
389         .get_vq_irq     = ifcvf_vdpa_get_vq_irq,
390         .kick_vq        = ifcvf_vdpa_kick_vq,
391         .get_generation = ifcvf_vdpa_get_generation,
392         .get_device_id  = ifcvf_vdpa_get_device_id,
393         .get_vendor_id  = ifcvf_vdpa_get_vendor_id,
394         .get_vq_align   = ifcvf_vdpa_get_vq_align,
395         .get_config     = ifcvf_vdpa_get_config,
396         .set_config     = ifcvf_vdpa_set_config,
397         .set_config_cb  = ifcvf_vdpa_set_config_cb,
398 };
399
400 static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
401 {
402         struct device *dev = &pdev->dev;
403         struct ifcvf_adapter *adapter;
404         struct ifcvf_hw *vf;
405         int ret, i;
406
407         ret = pcim_enable_device(pdev);
408         if (ret) {
409                 IFCVF_ERR(pdev, "Failed to enable device\n");
410                 return ret;
411         }
412
413         ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
414                                  IFCVF_DRIVER_NAME);
415         if (ret) {
416                 IFCVF_ERR(pdev, "Failed to request MMIO region\n");
417                 return ret;
418         }
419
420         ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
421         if (ret) {
422                 IFCVF_ERR(pdev, "No usable DMA configuration\n");
423                 return ret;
424         }
425
426         ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
427         if (ret) {
428                 IFCVF_ERR(pdev,
429                           "Failed for adding devres for freeing irq vectors\n");
430                 return ret;
431         }
432
433         adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
434                                     dev, &ifc_vdpa_ops,
435                                     IFCVF_MAX_QUEUE_PAIRS * 2);
436         if (adapter == NULL) {
437                 IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
438                 return -ENOMEM;
439         }
440
441         pci_set_master(pdev);
442         pci_set_drvdata(pdev, adapter);
443
444         vf = &adapter->vf;
445         vf->base = pcim_iomap_table(pdev);
446
447         adapter->pdev = pdev;
448         adapter->vdpa.dma_dev = &pdev->dev;
449
450         ret = ifcvf_init_hw(vf, pdev);
451         if (ret) {
452                 IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
453                 goto err;
454         }
455
456         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
457                 vf->vring[i].irq = -EINVAL;
458
459         ret = vdpa_register_device(&adapter->vdpa);
460         if (ret) {
461                 IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
462                 goto err;
463         }
464
465         return 0;
466
467 err:
468         put_device(&adapter->vdpa.dev);
469         return ret;
470 }
471
472 static void ifcvf_remove(struct pci_dev *pdev)
473 {
474         struct ifcvf_adapter *adapter = pci_get_drvdata(pdev);
475
476         vdpa_unregister_device(&adapter->vdpa);
477 }
478
479 static struct pci_device_id ifcvf_pci_ids[] = {
480         { PCI_DEVICE_SUB(IFCVF_VENDOR_ID,
481                 IFCVF_DEVICE_ID,
482                 IFCVF_SUBSYS_VENDOR_ID,
483                 IFCVF_SUBSYS_DEVICE_ID) },
484         { 0 },
485 };
486 MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
487
488 static struct pci_driver ifcvf_driver = {
489         .name     = IFCVF_DRIVER_NAME,
490         .id_table = ifcvf_pci_ids,
491         .probe    = ifcvf_probe,
492         .remove   = ifcvf_remove,
493 };
494
495 module_pci_driver(ifcvf_driver);
496
497 MODULE_LICENSE("GPL v2");
498 MODULE_VERSION(VERSION_STRING);