atlantic: Fix driver resume flow.
[linux-2.6-microblaze.git] / drivers / net / ethernet / aquantia / atlantic / aq_pci_func.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Atlantic Network Driver
3  *
4  * Copyright (C) 2014-2019 aQuantia Corporation
5  * Copyright (C) 2019-2020 Marvell International Ltd.
6  */
7
8 /* File aq_pci_func.c: Definition of PCI functions. */
9
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12
13 #include "aq_main.h"
14 #include "aq_nic.h"
15 #include "aq_vec.h"
16 #include "aq_hw.h"
17 #include "aq_pci_func.h"
18 #include "hw_atl/hw_atl_a0.h"
19 #include "hw_atl/hw_atl_b0.h"
20 #include "hw_atl2/hw_atl2.h"
21 #include "aq_filters.h"
22 #include "aq_drvinfo.h"
23 #include "aq_macsec.h"
24
25 static const struct pci_device_id aq_pci_tbl[] = {
26         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
27         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D100), },
28         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D107), },
29         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D108), },
30         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_D109), },
31
32         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100), },
33         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107), },
34         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108), },
35         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109), },
36         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111), },
37         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112), },
38
39         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC100S), },
40         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC107S), },
41         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC108S), },
42         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC109S), },
43         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC111S), },
44         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC112S), },
45
46         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113DEV), },
47         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113CS), },
48         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC114CS), },
49         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113), },
50         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC113C), },
51         { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_AQC115C), },
52
53         {}
54 };
55
56 static const struct aq_board_revision_s hw_atl_boards[] = {
57         { AQ_DEVICE_ID_0001,    AQ_HWREV_1,     &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
58         { AQ_DEVICE_ID_D100,    AQ_HWREV_1,     &hw_atl_ops_a0, &hw_atl_a0_caps_aqc100, },
59         { AQ_DEVICE_ID_D107,    AQ_HWREV_1,     &hw_atl_ops_a0, &hw_atl_a0_caps_aqc107, },
60         { AQ_DEVICE_ID_D108,    AQ_HWREV_1,     &hw_atl_ops_a0, &hw_atl_a0_caps_aqc108, },
61         { AQ_DEVICE_ID_D109,    AQ_HWREV_1,     &hw_atl_ops_a0, &hw_atl_a0_caps_aqc109, },
62
63         { AQ_DEVICE_ID_0001,    AQ_HWREV_2,     &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
64         { AQ_DEVICE_ID_D100,    AQ_HWREV_2,     &hw_atl_ops_b0, &hw_atl_b0_caps_aqc100, },
65         { AQ_DEVICE_ID_D107,    AQ_HWREV_2,     &hw_atl_ops_b0, &hw_atl_b0_caps_aqc107, },
66         { AQ_DEVICE_ID_D108,    AQ_HWREV_2,     &hw_atl_ops_b0, &hw_atl_b0_caps_aqc108, },
67         { AQ_DEVICE_ID_D109,    AQ_HWREV_2,     &hw_atl_ops_b0, &hw_atl_b0_caps_aqc109, },
68
69         { AQ_DEVICE_ID_AQC100,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100, },
70         { AQ_DEVICE_ID_AQC107,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107, },
71         { AQ_DEVICE_ID_AQC108,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108, },
72         { AQ_DEVICE_ID_AQC109,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109, },
73         { AQ_DEVICE_ID_AQC111,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111, },
74         { AQ_DEVICE_ID_AQC112,  AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112, },
75
76         { AQ_DEVICE_ID_AQC100S, AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc100s, },
77         { AQ_DEVICE_ID_AQC107S, AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc107s, },
78         { AQ_DEVICE_ID_AQC108S, AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc108s, },
79         { AQ_DEVICE_ID_AQC109S, AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc109s, },
80         { AQ_DEVICE_ID_AQC111S, AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc111s, },
81         { AQ_DEVICE_ID_AQC112S, AQ_HWREV_ANY,   &hw_atl_ops_b1, &hw_atl_b0_caps_aqc112s, },
82
83         { AQ_DEVICE_ID_AQC113DEV,       AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
84         { AQ_DEVICE_ID_AQC113,          AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
85         { AQ_DEVICE_ID_AQC113CS,        AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
86         { AQ_DEVICE_ID_AQC114CS,        AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
87         { AQ_DEVICE_ID_AQC113C,         AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
88         { AQ_DEVICE_ID_AQC115C,         AQ_HWREV_ANY,   &hw_atl2_ops, &hw_atl2_caps_aqc113, },
89 };
90
91 MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
92
93 static int aq_pci_probe_get_hw_by_id(struct pci_dev *pdev,
94                                      const struct aq_hw_ops **ops,
95                                      const struct aq_hw_caps_s **caps)
96 {
97         int i;
98
99         if (pdev->vendor != PCI_VENDOR_ID_AQUANTIA)
100                 return -EINVAL;
101
102         for (i = 0; i < ARRAY_SIZE(hw_atl_boards); i++) {
103                 if (hw_atl_boards[i].devid == pdev->device &&
104                     (hw_atl_boards[i].revision == AQ_HWREV_ANY ||
105                      hw_atl_boards[i].revision == pdev->revision)) {
106                         *ops = hw_atl_boards[i].ops;
107                         *caps = hw_atl_boards[i].caps;
108                         break;
109                 }
110         }
111
112         if (i == ARRAY_SIZE(hw_atl_boards))
113                 return -EINVAL;
114
115         return 0;
116 }
117
118 static int aq_pci_func_init(struct pci_dev *pdev)
119 {
120         int err;
121
122         err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
123         if (!err)
124                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
125         if (err) {
126                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
127                 if (!err)
128                         err = pci_set_consistent_dma_mask(pdev,
129                                                           DMA_BIT_MASK(32));
130         }
131         if (err != 0) {
132                 err = -ENOSR;
133                 goto err_exit;
134         }
135
136         err = pci_request_regions(pdev, AQ_CFG_DRV_NAME "_mmio");
137         if (err < 0)
138                 goto err_exit;
139
140         pci_set_master(pdev);
141
142         return 0;
143
144 err_exit:
145         return err;
146 }
147
148 int aq_pci_func_alloc_irq(struct aq_nic_s *self, unsigned int i,
149                           char *name, irq_handler_t irq_handler,
150                           void *irq_arg, cpumask_t *affinity_mask)
151 {
152         struct pci_dev *pdev = self->pdev;
153         int err;
154
155         if (pdev->msix_enabled || pdev->msi_enabled)
156                 err = request_irq(pci_irq_vector(pdev, i), irq_handler, 0,
157                                   name, irq_arg);
158         else
159                 err = request_irq(pci_irq_vector(pdev, i), aq_vec_isr_legacy,
160                                   IRQF_SHARED, name, irq_arg);
161
162         if (err >= 0) {
163                 self->msix_entry_mask |= (1 << i);
164
165                 if (pdev->msix_enabled && affinity_mask)
166                         irq_set_affinity_hint(pci_irq_vector(pdev, i),
167                                               affinity_mask);
168         }
169
170         return err;
171 }
172
173 void aq_pci_func_free_irqs(struct aq_nic_s *self)
174 {
175         struct pci_dev *pdev = self->pdev;
176         unsigned int i;
177         void *irq_data;
178
179         for (i = 32U; i--;) {
180                 if (!((1U << i) & self->msix_entry_mask))
181                         continue;
182                 if (self->aq_nic_cfg.link_irq_vec &&
183                     i == self->aq_nic_cfg.link_irq_vec)
184                         irq_data = self;
185                 else if (i < AQ_CFG_VECS_MAX)
186                         irq_data = self->aq_vec[i];
187                 else
188                         continue;
189
190                 if (pdev->msix_enabled)
191                         irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
192                 free_irq(pci_irq_vector(pdev, i), irq_data);
193                 self->msix_entry_mask &= ~(1U << i);
194         }
195 }
196
197 unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
198 {
199         if (self->pdev->msix_enabled)
200                 return AQ_HW_IRQ_MSIX;
201         if (self->pdev->msi_enabled)
202                 return AQ_HW_IRQ_MSI;
203
204         return AQ_HW_IRQ_LEGACY;
205 }
206
207 static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
208 {
209         pci_free_irq_vectors(self->pdev);
210 }
211
212 static int aq_pci_probe(struct pci_dev *pdev,
213                         const struct pci_device_id *pci_id)
214 {
215         struct net_device *ndev;
216         resource_size_t mmio_pa;
217         struct aq_nic_s *self;
218         u32 numvecs;
219         u32 bar;
220         int err;
221
222         err = pci_enable_device(pdev);
223         if (err)
224                 return err;
225
226         err = aq_pci_func_init(pdev);
227         if (err)
228                 goto err_pci_func;
229
230         ndev = aq_ndev_alloc();
231         if (!ndev) {
232                 err = -ENOMEM;
233                 goto err_ndev;
234         }
235
236         self = netdev_priv(ndev);
237         self->pdev = pdev;
238         SET_NETDEV_DEV(ndev, &pdev->dev);
239         pci_set_drvdata(pdev, self);
240
241         mutex_init(&self->fwreq_mutex);
242
243         err = aq_pci_probe_get_hw_by_id(pdev, &self->aq_hw_ops,
244                                         &aq_nic_get_cfg(self)->aq_hw_caps);
245         if (err)
246                 goto err_ioremap;
247
248         self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
249         if (!self->aq_hw) {
250                 err = -ENOMEM;
251                 goto err_ioremap;
252         }
253         self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
254         if (self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len) {
255                 int len = self->aq_hw->aq_nic_cfg->aq_hw_caps->priv_data_len;
256
257                 self->aq_hw->priv = kzalloc(len, GFP_KERNEL);
258                 if (!self->aq_hw->priv) {
259                         err = -ENOMEM;
260                         goto err_free_aq_hw;
261                 }
262         }
263
264         for (bar = 0; bar < 4; ++bar) {
265                 if (IORESOURCE_MEM & pci_resource_flags(pdev, bar)) {
266                         resource_size_t reg_sz;
267
268                         mmio_pa = pci_resource_start(pdev, bar);
269                         if (mmio_pa == 0U) {
270                                 err = -EIO;
271                                 goto err_free_aq_hw_priv;
272                         }
273
274                         reg_sz = pci_resource_len(pdev, bar);
275                         if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
276                                 err = -EIO;
277                                 goto err_free_aq_hw_priv;
278                         }
279
280                         self->aq_hw->mmio = ioremap(mmio_pa, reg_sz);
281                         if (!self->aq_hw->mmio) {
282                                 err = -EIO;
283                                 goto err_free_aq_hw_priv;
284                         }
285                         break;
286                 }
287         }
288
289         if (bar == 4) {
290                 err = -EIO;
291                 goto err_free_aq_hw_priv;
292         }
293
294         numvecs = min((u8)AQ_CFG_VECS_DEF,
295                       aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
296         numvecs = min(numvecs, num_online_cpus());
297         /* Request IRQ vector for PTP */
298         numvecs += 1;
299
300         numvecs += AQ_HW_SERVICE_IRQS;
301         /*enable interrupts */
302 #if !AQ_CFG_FORCE_LEGACY_INT
303         err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
304                                     PCI_IRQ_MSIX | PCI_IRQ_MSI |
305                                     PCI_IRQ_LEGACY);
306
307         if (err < 0)
308                 goto err_hwinit;
309         numvecs = err;
310 #endif
311         self->irqvecs = numvecs;
312
313         /* net device init */
314         aq_nic_cfg_start(self);
315
316         aq_nic_ndev_init(self);
317
318         err = aq_nic_ndev_register(self);
319         if (err < 0)
320                 goto err_register;
321
322         aq_drvinfo_init(ndev);
323
324         return 0;
325
326 err_register:
327         aq_nic_free_vectors(self);
328         aq_pci_free_irq_vectors(self);
329 err_hwinit:
330         iounmap(self->aq_hw->mmio);
331 err_free_aq_hw_priv:
332         kfree(self->aq_hw->priv);
333 err_free_aq_hw:
334         kfree(self->aq_hw);
335 err_ioremap:
336         free_netdev(ndev);
337 err_ndev:
338         pci_release_regions(pdev);
339 err_pci_func:
340         pci_disable_device(pdev);
341
342         return err;
343 }
344
345 static void aq_pci_remove(struct pci_dev *pdev)
346 {
347         struct aq_nic_s *self = pci_get_drvdata(pdev);
348
349         if (self->ndev) {
350                 aq_clear_rxnfc_all_rules(self);
351                 if (self->ndev->reg_state == NETREG_REGISTERED)
352                         unregister_netdev(self->ndev);
353
354 #if IS_ENABLED(CONFIG_MACSEC)
355                 aq_macsec_free(self);
356 #endif
357                 aq_nic_free_vectors(self);
358                 aq_pci_free_irq_vectors(self);
359                 iounmap(self->aq_hw->mmio);
360                 kfree(self->aq_hw->priv);
361                 kfree(self->aq_hw);
362                 pci_release_regions(pdev);
363                 free_netdev(self->ndev);
364         }
365
366         pci_disable_device(pdev);
367 }
368
369 static void aq_pci_shutdown(struct pci_dev *pdev)
370 {
371         struct aq_nic_s *self = pci_get_drvdata(pdev);
372
373         aq_nic_shutdown(self);
374
375         pci_disable_device(pdev);
376
377         if (system_state == SYSTEM_POWER_OFF) {
378                 pci_wake_from_d3(pdev, false);
379                 pci_set_power_state(pdev, PCI_D3hot);
380         }
381 }
382
383 static int aq_suspend_common(struct device *dev, bool deep)
384 {
385         struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev));
386
387         rtnl_lock();
388
389         nic->power_state = AQ_HW_POWER_STATE_D3;
390         netif_device_detach(nic->ndev);
391         netif_tx_stop_all_queues(nic->ndev);
392
393         if (netif_running(nic->ndev))
394                 aq_nic_stop(nic);
395
396         if (deep) {
397                 aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
398                 aq_nic_set_power(nic);
399         }
400
401         rtnl_unlock();
402
403         return 0;
404 }
405
406 static int atl_resume_common(struct device *dev, bool deep)
407 {
408         struct pci_dev *pdev = to_pci_dev(dev);
409         struct aq_nic_s *nic;
410         int ret = 0;
411
412         nic = pci_get_drvdata(pdev);
413
414         rtnl_lock();
415
416         pci_set_power_state(pdev, PCI_D0);
417         pci_restore_state(pdev);
418
419         if (deep) {
420                 /* Reinitialize Nic/Vecs objects */
421                 aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
422
423                 ret = aq_nic_init(nic);
424                 if (ret)
425                         goto err_exit;
426         }
427
428         if (netif_running(nic->ndev)) {
429                 ret = aq_nic_start(nic);
430                 if (ret)
431                         goto err_exit;
432         }
433
434         netif_device_attach(nic->ndev);
435         netif_tx_start_all_queues(nic->ndev);
436
437 err_exit:
438         if (ret < 0)
439                 aq_nic_deinit(nic, true);
440
441         rtnl_unlock();
442
443         return ret;
444 }
445
446 static int aq_pm_freeze(struct device *dev)
447 {
448         return aq_suspend_common(dev, false);
449 }
450
451 static int aq_pm_suspend_poweroff(struct device *dev)
452 {
453         return aq_suspend_common(dev, true);
454 }
455
456 static int aq_pm_thaw(struct device *dev)
457 {
458         return atl_resume_common(dev, false);
459 }
460
461 static int aq_pm_resume_restore(struct device *dev)
462 {
463         return atl_resume_common(dev, true);
464 }
465
466 static const struct dev_pm_ops aq_pm_ops = {
467         .suspend = aq_pm_suspend_poweroff,
468         .poweroff = aq_pm_suspend_poweroff,
469         .freeze = aq_pm_freeze,
470         .resume = aq_pm_resume_restore,
471         .restore = aq_pm_resume_restore,
472         .thaw = aq_pm_thaw,
473 };
474
475 static struct pci_driver aq_pci_ops = {
476         .name = AQ_CFG_DRV_NAME,
477         .id_table = aq_pci_tbl,
478         .probe = aq_pci_probe,
479         .remove = aq_pci_remove,
480         .shutdown = aq_pci_shutdown,
481 #ifdef CONFIG_PM
482         .driver.pm = &aq_pm_ops,
483 #endif
484 };
485
486 int aq_pci_func_register_driver(void)
487 {
488         return pci_register_driver(&aq_pci_ops);
489 }
490
491 void aq_pci_func_unregister_driver(void)
492 {
493         pci_unregister_driver(&aq_pci_ops);
494 }
495