ARM: dts: kirkwood: ReadyNAS NV+v2: Add LCD panel
[linux-2.6-microblaze.git] / drivers / vdpa / ifcvf / ifcvf_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Intel IFC VF NIC driver for virtio dataplane offloading
4  *
5  * Copyright (C) 2020 Intel Corporation.
6  *
7  * Author: Zhu Lingshan <lingshan.zhu@intel.com>
8  *
9  */
10
11 #include <linux/interrupt.h>
12 #include <linux/module.h>
13 #include <linux/pci.h>
14 #include <linux/sysfs.h>
15 #include "ifcvf_base.h"
16
17 #define VERSION_STRING  "0.1"
18 #define DRIVER_AUTHOR   "Intel Corporation"
19 #define IFCVF_DRIVER_NAME       "ifcvf"
20
21 static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
22 {
23         struct vring_info *vring = arg;
24
25         if (vring->cb.callback)
26                 return vring->cb.callback(vring->cb.private);
27
28         return IRQ_HANDLED;
29 }
30
31 static int ifcvf_start_datapath(void *private)
32 {
33         struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
34         struct ifcvf_adapter *ifcvf;
35         u8 status;
36         int ret;
37
38         ifcvf = vf_to_adapter(vf);
39         vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
40         ret = ifcvf_start_hw(vf);
41         if (ret < 0) {
42                 status = ifcvf_get_status(vf);
43                 status |= VIRTIO_CONFIG_S_FAILED;
44                 ifcvf_set_status(vf, status);
45         }
46
47         return ret;
48 }
49
50 static int ifcvf_stop_datapath(void *private)
51 {
52         struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
53         int i;
54
55         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
56                 vf->vring[i].cb.callback = NULL;
57
58         ifcvf_stop_hw(vf);
59
60         return 0;
61 }
62
63 static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
64 {
65         struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
66         int i;
67
68         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
69                 vf->vring[i].last_avail_idx = 0;
70                 vf->vring[i].desc = 0;
71                 vf->vring[i].avail = 0;
72                 vf->vring[i].used = 0;
73                 vf->vring[i].ready = 0;
74                 vf->vring[i].cb.callback = NULL;
75                 vf->vring[i].cb.private = NULL;
76         }
77
78         ifcvf_reset(vf);
79 }
80
81 static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
82 {
83         return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
84 }
85
86 static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
87 {
88         struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
89
90         return &adapter->vf;
91 }
92
93 static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
94 {
95         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
96         u64 features;
97
98         features = ifcvf_get_features(vf) & IFCVF_SUPPORTED_FEATURES;
99
100         return features;
101 }
102
103 static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
104 {
105         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
106
107         vf->req_features = features;
108
109         return 0;
110 }
111
112 static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
113 {
114         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
115
116         return ifcvf_get_status(vf);
117 }
118
119 static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
120 {
121         struct ifcvf_adapter *adapter;
122         struct ifcvf_hw *vf;
123
124         vf  = vdpa_to_vf(vdpa_dev);
125         adapter = dev_get_drvdata(vdpa_dev->dev.parent);
126
127         if (status == 0) {
128                 ifcvf_stop_datapath(adapter);
129                 ifcvf_reset_vring(adapter);
130                 return;
131         }
132
133         if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
134                 if (ifcvf_start_datapath(adapter) < 0)
135                         IFCVF_ERR(adapter->pdev,
136                                   "Failed to set ifcvf vdpa  status %u\n",
137                                   status);
138         }
139
140         ifcvf_set_status(vf, status);
141 }
142
143 static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
144 {
145         return IFCVF_QUEUE_MAX;
146 }
147
148 static u64 ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid)
149 {
150         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
151
152         return ifcvf_get_vq_state(vf, qid);
153 }
154
155 static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
156                                    u64 num)
157 {
158         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
159
160         return ifcvf_set_vq_state(vf, qid, num);
161 }
162
163 static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
164                                  struct vdpa_callback *cb)
165 {
166         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
167
168         vf->vring[qid].cb = *cb;
169 }
170
171 static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
172                                     u16 qid, bool ready)
173 {
174         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
175
176         vf->vring[qid].ready = ready;
177 }
178
179 static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
180 {
181         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
182
183         return vf->vring[qid].ready;
184 }
185
186 static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
187                                   u32 num)
188 {
189         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
190
191         vf->vring[qid].size = num;
192 }
193
194 static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
195                                      u64 desc_area, u64 driver_area,
196                                      u64 device_area)
197 {
198         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
199
200         vf->vring[qid].desc = desc_area;
201         vf->vring[qid].avail = driver_area;
202         vf->vring[qid].used = device_area;
203
204         return 0;
205 }
206
207 static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
208 {
209         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
210
211         ifcvf_notify_queue(vf, qid);
212 }
213
214 static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
215 {
216         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
217
218         return ioread8(&vf->common_cfg->config_generation);
219 }
220
221 static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
222 {
223         return VIRTIO_ID_NET;
224 }
225
226 static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
227 {
228         return IFCVF_SUBSYS_VENDOR_ID;
229 }
230
231 static u16 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
232 {
233         return IFCVF_QUEUE_ALIGNMENT;
234 }
235
236 static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
237                                   unsigned int offset,
238                                   void *buf, unsigned int len)
239 {
240         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
241
242         WARN_ON(offset + len > sizeof(struct virtio_net_config));
243         ifcvf_read_net_config(vf, offset, buf, len);
244 }
245
246 static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
247                                   unsigned int offset, const void *buf,
248                                   unsigned int len)
249 {
250         struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
251
252         WARN_ON(offset + len > sizeof(struct virtio_net_config));
253         ifcvf_write_net_config(vf, offset, buf, len);
254 }
255
256 static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
257                                      struct vdpa_callback *cb)
258 {
259         /* We don't support config interrupt */
260 }
261
262 /*
263  * IFCVF currently does't have on-chip IOMMU, so not
264  * implemented set_map()/dma_map()/dma_unmap()
265  */
266 static const struct vdpa_config_ops ifc_vdpa_ops = {
267         .get_features   = ifcvf_vdpa_get_features,
268         .set_features   = ifcvf_vdpa_set_features,
269         .get_status     = ifcvf_vdpa_get_status,
270         .set_status     = ifcvf_vdpa_set_status,
271         .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
272         .get_vq_state   = ifcvf_vdpa_get_vq_state,
273         .set_vq_state   = ifcvf_vdpa_set_vq_state,
274         .set_vq_cb      = ifcvf_vdpa_set_vq_cb,
275         .set_vq_ready   = ifcvf_vdpa_set_vq_ready,
276         .get_vq_ready   = ifcvf_vdpa_get_vq_ready,
277         .set_vq_num     = ifcvf_vdpa_set_vq_num,
278         .set_vq_address = ifcvf_vdpa_set_vq_address,
279         .kick_vq        = ifcvf_vdpa_kick_vq,
280         .get_generation = ifcvf_vdpa_get_generation,
281         .get_device_id  = ifcvf_vdpa_get_device_id,
282         .get_vendor_id  = ifcvf_vdpa_get_vendor_id,
283         .get_vq_align   = ifcvf_vdpa_get_vq_align,
284         .get_config     = ifcvf_vdpa_get_config,
285         .set_config     = ifcvf_vdpa_set_config,
286         .set_config_cb  = ifcvf_vdpa_set_config_cb,
287 };
288
289 static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
290 {
291         struct pci_dev *pdev = adapter->pdev;
292         struct ifcvf_hw *vf = &adapter->vf;
293         int vector, i, ret, irq;
294
295
296         for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
297                 snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
298                          pci_name(pdev), i);
299                 vector = i + IFCVF_MSI_QUEUE_OFF;
300                 irq = pci_irq_vector(pdev, vector);
301                 ret = devm_request_irq(&pdev->dev, irq,
302                                        ifcvf_intr_handler, 0,
303                                        vf->vring[i].msix_name,
304                                        &vf->vring[i]);
305                 if (ret) {
306                         IFCVF_ERR(pdev,
307                                   "Failed to request irq for vq %d\n", i);
308                         return ret;
309                 }
310                 vf->vring[i].irq = irq;
311         }
312
313         return 0;
314 }
315
316 static void ifcvf_free_irq_vectors(void *data)
317 {
318         pci_free_irq_vectors(data);
319 }
320
321 static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
322 {
323         struct device *dev = &pdev->dev;
324         struct ifcvf_adapter *adapter;
325         struct ifcvf_hw *vf;
326         int ret;
327
328         ret = pcim_enable_device(pdev);
329         if (ret) {
330                 IFCVF_ERR(pdev, "Failed to enable device\n");
331                 return ret;
332         }
333
334         ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
335                                  IFCVF_DRIVER_NAME);
336         if (ret) {
337                 IFCVF_ERR(pdev, "Failed to request MMIO region\n");
338                 return ret;
339         }
340
341         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
342         if (ret) {
343                 IFCVF_ERR(pdev, "No usable DMA confiugration\n");
344                 return ret;
345         }
346
347         ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
348         if (ret) {
349                 IFCVF_ERR(pdev,
350                           "No usable coherent DMA confiugration\n");
351                 return ret;
352         }
353
354         ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
355                                     IFCVF_MAX_INTR, PCI_IRQ_MSIX);
356         if (ret < 0) {
357                 IFCVF_ERR(pdev, "Failed to alloc irq vectors\n");
358                 return ret;
359         }
360
361         ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
362         if (ret) {
363                 IFCVF_ERR(pdev,
364                           "Failed for adding devres for freeing irq vectors\n");
365                 return ret;
366         }
367
368         adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
369                                     dev, &ifc_vdpa_ops);
370         if (adapter == NULL) {
371                 IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
372                 return -ENOMEM;
373         }
374
375         pci_set_master(pdev);
376         pci_set_drvdata(pdev, adapter);
377
378         vf = &adapter->vf;
379         vf->base = pcim_iomap_table(pdev);
380
381         adapter->pdev = pdev;
382         adapter->vdpa.dma_dev = &pdev->dev;
383
384         ret = ifcvf_request_irq(adapter);
385         if (ret) {
386                 IFCVF_ERR(pdev, "Failed to request MSI-X irq\n");
387                 goto err;
388         }
389
390         ret = ifcvf_init_hw(vf, pdev);
391         if (ret) {
392                 IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
393                 goto err;
394         }
395
396         ret = vdpa_register_device(&adapter->vdpa);
397         if (ret) {
398                 IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
399                 goto err;
400         }
401
402         return 0;
403
404 err:
405         put_device(&adapter->vdpa.dev);
406         return ret;
407 }
408
409 static void ifcvf_remove(struct pci_dev *pdev)
410 {
411         struct ifcvf_adapter *adapter = pci_get_drvdata(pdev);
412
413         vdpa_unregister_device(&adapter->vdpa);
414 }
415
416 static struct pci_device_id ifcvf_pci_ids[] = {
417         { PCI_DEVICE_SUB(IFCVF_VENDOR_ID,
418                 IFCVF_DEVICE_ID,
419                 IFCVF_SUBSYS_VENDOR_ID,
420                 IFCVF_SUBSYS_DEVICE_ID) },
421         { 0 },
422 };
423 MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
424
425 static struct pci_driver ifcvf_driver = {
426         .name     = IFCVF_DRIVER_NAME,
427         .id_table = ifcvf_pci_ids,
428         .probe    = ifcvf_probe,
429         .remove   = ifcvf_remove,
430 };
431
432 module_pci_driver(ifcvf_driver);
433
434 MODULE_LICENSE("GPL v2");
435 MODULE_VERSION(VERSION_STRING);