Merge branch 'misc.namei' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / drivers / net / ethernet / cavium / liquidio / lio_vf_main.c
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <net/vxlan.h>
22 #include "liquidio_common.h"
23 #include "octeon_droq.h"
24 #include "octeon_iq.h"
25 #include "response_manager.h"
26 #include "octeon_device.h"
27 #include "octeon_nic.h"
28 #include "octeon_main.h"
29 #include "octeon_network.h"
30 #include "cn23xx_vf_device.h"
31
32 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
33 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
34 MODULE_LICENSE("GPL");
35
36 static int debug = -1;
37 module_param(debug, int, 0644);
38 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
39
40 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
41
42 struct oct_timestamp_resp {
43         u64 rh;
44         u64 timestamp;
45         u64 status;
46 };
47
48 union tx_info {
49         u64 u64;
50         struct {
51 #ifdef __BIG_ENDIAN_BITFIELD
52                 u16 gso_size;
53                 u16 gso_segs;
54                 u32 reserved;
55 #else
56                 u32 reserved;
57                 u16 gso_segs;
58                 u16 gso_size;
59 #endif
60         } s;
61 };
62
63 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
64 #define OCTNIC_GSO_MAX_SIZE \
65                 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
66
67 static int
68 liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
69 static void liquidio_vf_remove(struct pci_dev *pdev);
70 static int octeon_device_init(struct octeon_device *oct);
71 static int liquidio_stop(struct net_device *netdev);
72
73 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
74 {
75         struct octeon_device_priv *oct_priv =
76             (struct octeon_device_priv *)oct->priv;
77         int retry = MAX_IO_PENDING_PKT_COUNT;
78         int pkt_cnt = 0, pending_pkts;
79         int i;
80
81         do {
82                 pending_pkts = 0;
83
84                 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
85                         if (!(oct->io_qmask.oq & BIT_ULL(i)))
86                                 continue;
87                         pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
88                 }
89                 if (pkt_cnt > 0) {
90                         pending_pkts += pkt_cnt;
91                         tasklet_schedule(&oct_priv->droq_tasklet);
92                 }
93                 pkt_cnt = 0;
94                 schedule_timeout_uninterruptible(1);
95
96         } while (retry-- && pending_pkts);
97
98         return pkt_cnt;
99 }
100
101 /**
102  * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc
103  * @oct: Pointer to Octeon device
104  */
105 static void pcierror_quiesce_device(struct octeon_device *oct)
106 {
107         int i;
108
109         /* Disable the input and output queues now. No more packets will
110          * arrive from Octeon, but we should wait for all packet processing
111          * to finish.
112          */
113
114         /* To allow for in-flight requests */
115         schedule_timeout_uninterruptible(100);
116
117         if (wait_for_pending_requests(oct))
118                 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
119
120         /* Force all requests waiting to be fetched by OCTEON to complete. */
121         for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
122                 struct octeon_instr_queue *iq;
123
124                 if (!(oct->io_qmask.iq & BIT_ULL(i)))
125                         continue;
126                 iq = oct->instr_queue[i];
127
128                 if (atomic_read(&iq->instr_pending)) {
129                         spin_lock_bh(&iq->lock);
130                         iq->fill_cnt = 0;
131                         iq->octeon_read_index = iq->host_write_index;
132                         iq->stats.instr_processed +=
133                             atomic_read(&iq->instr_pending);
134                         lio_process_iq_request_list(oct, iq, 0);
135                         spin_unlock_bh(&iq->lock);
136                 }
137         }
138
139         /* Force all pending ordered list requests to time out. */
140         lio_process_ordered_list(oct, 1);
141
142         /* We do not need to wait for output queue packets to be processed. */
143 }
144
145 /**
146  * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status
147  * @dev: Pointer to PCI device
148  */
149 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
150 {
151         u32 status, mask;
152         int pos = 0x100;
153
154         pr_info("%s :\n", __func__);
155
156         pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
157         pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
158         if (dev->error_state == pci_channel_io_normal)
159                 status &= ~mask; /* Clear corresponding nonfatal bits */
160         else
161                 status &= mask; /* Clear corresponding fatal bits */
162         pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
163 }
164
165 /**
166  * stop_pci_io - Stop all PCI IO to a given device
167  * @oct: Pointer to Octeon device
168  */
169 static void stop_pci_io(struct octeon_device *oct)
170 {
171         struct msix_entry *msix_entries;
172         int i;
173
174         /* No more instructions will be forwarded. */
175         atomic_set(&oct->status, OCT_DEV_IN_RESET);
176
177         for (i = 0; i < oct->ifcount; i++)
178                 netif_device_detach(oct->props[i].netdev);
179
180         /* Disable interrupts  */
181         oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
182
183         pcierror_quiesce_device(oct);
184         if (oct->msix_on) {
185                 msix_entries = (struct msix_entry *)oct->msix_entries;
186                 for (i = 0; i < oct->num_msix_irqs; i++) {
187                         /* clear the affinity_cpumask */
188                         irq_set_affinity_hint(msix_entries[i].vector,
189                                               NULL);
190                         free_irq(msix_entries[i].vector,
191                                  &oct->ioq_vector[i]);
192                 }
193                 pci_disable_msix(oct->pci_dev);
194                 kfree(oct->msix_entries);
195                 oct->msix_entries = NULL;
196                 octeon_free_ioq_vector(oct);
197         }
198         dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
199                 lio_get_state_string(&oct->status));
200
201         /* making it a common function for all OCTEON models */
202         cleanup_aer_uncorrect_error_status(oct->pci_dev);
203
204         pci_disable_device(oct->pci_dev);
205 }
206
207 /**
208  * liquidio_pcie_error_detected - called when PCI error is detected
209  * @pdev: Pointer to PCI device
210  * @state: The current pci connection state
211  *
212  * This function is called after a PCI bus error affecting
213  * this device has been detected.
214  */
215 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
216                                                      pci_channel_state_t state)
217 {
218         struct octeon_device *oct = pci_get_drvdata(pdev);
219
220         /* Non-correctable Non-fatal errors */
221         if (state == pci_channel_io_normal) {
222                 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
223                 cleanup_aer_uncorrect_error_status(oct->pci_dev);
224                 return PCI_ERS_RESULT_CAN_RECOVER;
225         }
226
227         /* Non-correctable Fatal errors */
228         dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
229         stop_pci_io(oct);
230
231         return PCI_ERS_RESULT_DISCONNECT;
232 }
233
234 /* For PCI-E Advanced Error Recovery (AER) Interface */
235 static const struct pci_error_handlers liquidio_vf_err_handler = {
236         .error_detected = liquidio_pcie_error_detected,
237 };
238
239 static const struct pci_device_id liquidio_vf_pci_tbl[] = {
240         {
241                 PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID,
242                 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
243         },
244         {
245                 0, 0, 0, 0, 0, 0, 0
246         }
247 };
248 MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl);
249
250 static struct pci_driver liquidio_vf_pci_driver = {
251         .name           = "LiquidIO_VF",
252         .id_table       = liquidio_vf_pci_tbl,
253         .probe          = liquidio_vf_probe,
254         .remove         = liquidio_vf_remove,
255         .err_handler    = &liquidio_vf_err_handler,    /* For AER */
256 };
257
258 /**
259  * print_link_info - Print link information
260  * @netdev: network device
261  */
262 static void print_link_info(struct net_device *netdev)
263 {
264         struct lio *lio = GET_LIO(netdev);
265
266         if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
267             ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
268                 struct oct_link_info *linfo = &lio->linfo;
269
270                 if (linfo->link.s.link_up) {
271                         netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
272                                    linfo->link.s.speed,
273                                    (linfo->link.s.duplex) ? "Full" : "Half");
274                 } else {
275                         netif_info(lio, link, lio->netdev, "Link Down\n");
276                 }
277         }
278 }
279
280 /**
281  * octnet_link_status_change - Routine to notify MTU change
282  * @work: work_struct data structure
283  */
284 static void octnet_link_status_change(struct work_struct *work)
285 {
286         struct cavium_wk *wk = (struct cavium_wk *)work;
287         struct lio *lio = (struct lio *)wk->ctxptr;
288
289         /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
290          * this API is invoked only when new max-MTU of the interface is
291          * less than current MTU.
292          */
293         rtnl_lock();
294         dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
295         rtnl_unlock();
296 }
297
298 /**
299  * setup_link_status_change_wq - Sets up the mtu status change work
300  * @netdev: network device
301  */
302 static int setup_link_status_change_wq(struct net_device *netdev)
303 {
304         struct lio *lio = GET_LIO(netdev);
305         struct octeon_device *oct = lio->oct_dev;
306
307         lio->link_status_wq.wq = alloc_workqueue("link-status",
308                                                  WQ_MEM_RECLAIM, 0);
309         if (!lio->link_status_wq.wq) {
310                 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
311                 return -1;
312         }
313         INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
314                           octnet_link_status_change);
315         lio->link_status_wq.wk.ctxptr = lio;
316
317         return 0;
318 }
319
320 static void cleanup_link_status_change_wq(struct net_device *netdev)
321 {
322         struct lio *lio = GET_LIO(netdev);
323
324         if (lio->link_status_wq.wq) {
325                 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
326                 destroy_workqueue(lio->link_status_wq.wq);
327         }
328 }
329
330 /**
331  * update_link_status - Update link status
332  * @netdev: network device
333  * @ls: link status structure
334  *
335  * Called on receipt of a link status response from the core application to
336  * update each interface's link status.
337  */
338 static void update_link_status(struct net_device *netdev,
339                                union oct_link_status *ls)
340 {
341         struct lio *lio = GET_LIO(netdev);
342         int current_max_mtu = lio->linfo.link.s.mtu;
343         struct octeon_device *oct = lio->oct_dev;
344
345         if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
346                 lio->linfo.link.u64 = ls->u64;
347
348                 print_link_info(netdev);
349                 lio->link_changes++;
350
351                 if (lio->linfo.link.s.link_up) {
352                         netif_carrier_on(netdev);
353                         wake_txqs(netdev);
354                 } else {
355                         netif_carrier_off(netdev);
356                         stop_txqs(netdev);
357                 }
358
359                 if (lio->linfo.link.s.mtu != current_max_mtu) {
360                         dev_info(&oct->pci_dev->dev,
361                                  "Max MTU Changed from %d to %d\n",
362                                  current_max_mtu, lio->linfo.link.s.mtu);
363                         netdev->max_mtu = lio->linfo.link.s.mtu;
364                 }
365
366                 if (lio->linfo.link.s.mtu < netdev->mtu) {
367                         dev_warn(&oct->pci_dev->dev,
368                                  "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
369                                  netdev->mtu, lio->linfo.link.s.mtu);
370                         queue_delayed_work(lio->link_status_wq.wq,
371                                            &lio->link_status_wq.wk.work, 0);
372                 }
373         }
374 }
375
376 /**
377  * liquidio_vf_probe - PCI probe handler
378  * @pdev: PCI device structure
379  * @ent: unused
380  */
381 static int
382 liquidio_vf_probe(struct pci_dev *pdev,
383                   const struct pci_device_id __maybe_unused *ent)
384 {
385         struct octeon_device *oct_dev = NULL;
386
387         oct_dev = octeon_allocate_device(pdev->device,
388                                          sizeof(struct octeon_device_priv));
389
390         if (!oct_dev) {
391                 dev_err(&pdev->dev, "Unable to allocate device\n");
392                 return -ENOMEM;
393         }
394         oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
395
396         dev_info(&pdev->dev, "Initializing device %x:%x.\n",
397                  (u32)pdev->vendor, (u32)pdev->device);
398
399         /* Assign octeon_device for this device to the private data area. */
400         pci_set_drvdata(pdev, oct_dev);
401
402         /* set linux specific device pointer */
403         oct_dev->pci_dev = pdev;
404
405         oct_dev->subsystem_id = pdev->subsystem_vendor |
406                 (pdev->subsystem_device << 16);
407
408         if (octeon_device_init(oct_dev)) {
409                 liquidio_vf_remove(pdev);
410                 return -ENOMEM;
411         }
412
413         dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
414
415         return 0;
416 }
417
418 /**
419  * octeon_pci_flr - PCI FLR for each Octeon device.
420  * @oct: octeon device
421  */
422 static void octeon_pci_flr(struct octeon_device *oct)
423 {
424         pci_save_state(oct->pci_dev);
425
426         pci_cfg_access_lock(oct->pci_dev);
427
428         /* Quiesce the device completely */
429         pci_write_config_word(oct->pci_dev, PCI_COMMAND,
430                               PCI_COMMAND_INTX_DISABLE);
431
432         pcie_flr(oct->pci_dev);
433
434         pci_cfg_access_unlock(oct->pci_dev);
435
436         pci_restore_state(oct->pci_dev);
437 }
438
439 /**
440  * octeon_destroy_resources - Destroy resources associated with octeon device
441  * @oct: octeon device
442  */
443 static void octeon_destroy_resources(struct octeon_device *oct)
444 {
445         struct octeon_device_priv *oct_priv =
446                 (struct octeon_device_priv *)oct->priv;
447         struct msix_entry *msix_entries;
448         int i;
449
450         switch (atomic_read(&oct->status)) {
451         case OCT_DEV_RUNNING:
452         case OCT_DEV_CORE_OK:
453                 /* No more instructions will be forwarded. */
454                 atomic_set(&oct->status, OCT_DEV_IN_RESET);
455
456                 oct->app_mode = CVM_DRV_INVALID_APP;
457                 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
458                         lio_get_state_string(&oct->status));
459
460                 schedule_timeout_uninterruptible(HZ / 10);
461
462                 fallthrough;
463         case OCT_DEV_HOST_OK:
464         case OCT_DEV_IO_QUEUES_DONE:
465                 if (lio_wait_for_instr_fetch(oct))
466                         dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
467
468                 if (wait_for_pending_requests(oct))
469                         dev_err(&oct->pci_dev->dev, "There were pending requests\n");
470
471                 /* Disable the input and output queues now. No more packets will
472                  * arrive from Octeon, but we should wait for all packet
473                  * processing to finish.
474                  */
475                 oct->fn_list.disable_io_queues(oct);
476
477                 if (lio_wait_for_oq_pkts(oct))
478                         dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
479
480                 /* Force all requests waiting to be fetched by OCTEON to
481                  * complete.
482                  */
483                 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
484                         struct octeon_instr_queue *iq;
485
486                         if (!(oct->io_qmask.iq & BIT_ULL(i)))
487                                 continue;
488                         iq = oct->instr_queue[i];
489
490                         if (atomic_read(&iq->instr_pending)) {
491                                 spin_lock_bh(&iq->lock);
492                                 iq->fill_cnt = 0;
493                                 iq->octeon_read_index = iq->host_write_index;
494                                 iq->stats.instr_processed +=
495                                         atomic_read(&iq->instr_pending);
496                                 lio_process_iq_request_list(oct, iq, 0);
497                                 spin_unlock_bh(&iq->lock);
498                         }
499                 }
500
501                 lio_process_ordered_list(oct, 1);
502                 octeon_free_sc_done_list(oct);
503                 octeon_free_sc_zombie_list(oct);
504
505                 fallthrough;
506         case OCT_DEV_INTR_SET_DONE:
507                 /* Disable interrupts  */
508                 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
509
510                 if (oct->msix_on) {
511                         msix_entries = (struct msix_entry *)oct->msix_entries;
512                         for (i = 0; i < oct->num_msix_irqs; i++) {
513                                 if (oct->ioq_vector[i].vector) {
514                                         irq_set_affinity_hint(
515                                                         msix_entries[i].vector,
516                                                         NULL);
517                                         free_irq(msix_entries[i].vector,
518                                                  &oct->ioq_vector[i]);
519                                         oct->ioq_vector[i].vector = 0;
520                                 }
521                         }
522                         pci_disable_msix(oct->pci_dev);
523                         kfree(oct->msix_entries);
524                         oct->msix_entries = NULL;
525                         kfree(oct->irq_name_storage);
526                         oct->irq_name_storage = NULL;
527                 }
528                 /* Soft reset the octeon device before exiting */
529                 if (!pcie_reset_flr(oct->pci_dev, PCI_RESET_PROBE))
530                         octeon_pci_flr(oct);
531                 else
532                         cn23xx_vf_ask_pf_to_do_flr(oct);
533
534                 fallthrough;
535         case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
536                 octeon_free_ioq_vector(oct);
537
538                 fallthrough;
539         case OCT_DEV_MBOX_SETUP_DONE:
540                 oct->fn_list.free_mbox(oct);
541
542                 fallthrough;
543         case OCT_DEV_IN_RESET:
544         case OCT_DEV_DROQ_INIT_DONE:
545                 mdelay(100);
546                 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
547                         if (!(oct->io_qmask.oq & BIT_ULL(i)))
548                                 continue;
549                         octeon_delete_droq(oct, i);
550                 }
551
552                 fallthrough;
553         case OCT_DEV_RESP_LIST_INIT_DONE:
554                 octeon_delete_response_list(oct);
555
556                 fallthrough;
557         case OCT_DEV_INSTR_QUEUE_INIT_DONE:
558                 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
559                         if (!(oct->io_qmask.iq & BIT_ULL(i)))
560                                 continue;
561                         octeon_delete_instr_queue(oct, i);
562                 }
563
564                 fallthrough;
565         case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
566                 octeon_free_sc_buffer_pool(oct);
567
568                 fallthrough;
569         case OCT_DEV_DISPATCH_INIT_DONE:
570                 octeon_delete_dispatch_list(oct);
571                 cancel_delayed_work_sync(&oct->nic_poll_work.work);
572
573                 fallthrough;
574         case OCT_DEV_PCI_MAP_DONE:
575                 octeon_unmap_pci_barx(oct, 0);
576                 octeon_unmap_pci_barx(oct, 1);
577
578                 fallthrough;
579         case OCT_DEV_PCI_ENABLE_DONE:
580                 pci_clear_master(oct->pci_dev);
581                 /* Disable the device, releasing the PCI INT */
582                 pci_disable_device(oct->pci_dev);
583
584                 fallthrough;
585         case OCT_DEV_BEGIN_STATE:
586                 /* Nothing to be done here either */
587                 break;
588         }
589
590         tasklet_kill(&oct_priv->droq_tasklet);
591 }
592
593 /**
594  * send_rx_ctrl_cmd - Send Rx control command
595  * @lio: per-network private data
596  * @start_stop: whether to start or stop
597  */
598 static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
599 {
600         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
601         struct octeon_soft_command *sc;
602         union octnet_cmd *ncmd;
603         int retval;
604
605         if (oct->props[lio->ifidx].rx_on == start_stop)
606                 return 0;
607
608         sc = (struct octeon_soft_command *)
609                 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
610                                           16, 0);
611         if (!sc) {
612                 netif_info(lio, rx_err, lio->netdev,
613                            "Failed to allocate octeon_soft_command struct\n");
614                 return -ENOMEM;
615         }
616
617         ncmd = (union octnet_cmd *)sc->virtdptr;
618
619         ncmd->u64 = 0;
620         ncmd->s.cmd = OCTNET_CMD_RX_CTL;
621         ncmd->s.param1 = start_stop;
622
623         octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
624
625         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
626
627         octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
628                                     OPCODE_NIC_CMD, 0, 0, 0);
629
630         init_completion(&sc->complete);
631         sc->sc_status = OCTEON_REQUEST_PENDING;
632
633         retval = octeon_send_soft_command(oct, sc);
634         if (retval == IQ_SEND_FAILED) {
635                 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
636                 octeon_free_soft_command(oct, sc);
637         } else {
638                 /* Sleep on a wait queue till the cond flag indicates that the
639                  * response arrived or timed-out.
640                  */
641                 retval = wait_for_sc_completion_timeout(oct, sc, 0);
642                 if (retval)
643                         return retval;
644
645                 oct->props[lio->ifidx].rx_on = start_stop;
646                 WRITE_ONCE(sc->caller_is_done, true);
647         }
648
649         return retval;
650 }
651
652 /**
653  * liquidio_destroy_nic_device - Destroy NIC device interface
654  * @oct: octeon device
655  * @ifidx: which interface to destroy
656  *
657  * Cleanup associated with each interface for an Octeon device  when NIC
658  * module is being unloaded or if initialization fails during load.
659  */
660 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
661 {
662         struct net_device *netdev = oct->props[ifidx].netdev;
663         struct octeon_device_priv *oct_priv =
664                 (struct octeon_device_priv *)oct->priv;
665         struct napi_struct *napi, *n;
666         struct lio *lio;
667
668         if (!netdev) {
669                 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
670                         __func__, ifidx);
671                 return;
672         }
673
674         lio = GET_LIO(netdev);
675
676         dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
677
678         if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
679                 liquidio_stop(netdev);
680
681         if (oct->props[lio->ifidx].napi_enabled == 1) {
682                 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
683                         napi_disable(napi);
684
685                 oct->props[lio->ifidx].napi_enabled = 0;
686
687                 oct->droq[0]->ops.poll_mode = 0;
688         }
689
690         /* Delete NAPI */
691         list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
692                 netif_napi_del(napi);
693
694         tasklet_enable(&oct_priv->droq_tasklet);
695
696         if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
697                 unregister_netdev(netdev);
698
699         cleanup_rx_oom_poll_fn(netdev);
700
701         cleanup_link_status_change_wq(netdev);
702
703         lio_delete_glists(lio);
704
705         free_netdev(netdev);
706
707         oct->props[ifidx].gmxport = -1;
708
709         oct->props[ifidx].netdev = NULL;
710 }
711
712 /**
713  * liquidio_stop_nic_module - Stop complete NIC functionality
714  * @oct: octeon device
715  */
716 static int liquidio_stop_nic_module(struct octeon_device *oct)
717 {
718         struct lio *lio;
719         int i, j;
720
721         dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
722         if (!oct->ifcount) {
723                 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
724                 return 1;
725         }
726
727         spin_lock_bh(&oct->cmd_resp_wqlock);
728         oct->cmd_resp_state = OCT_DRV_OFFLINE;
729         spin_unlock_bh(&oct->cmd_resp_wqlock);
730
731         for (i = 0; i < oct->ifcount; i++) {
732                 lio = GET_LIO(oct->props[i].netdev);
733                 for (j = 0; j < oct->num_oqs; j++)
734                         octeon_unregister_droq_ops(oct,
735                                                    lio->linfo.rxpciq[j].s.q_no);
736         }
737
738         for (i = 0; i < oct->ifcount; i++)
739                 liquidio_destroy_nic_device(oct, i);
740
741         dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
742         return 0;
743 }
744
745 /**
746  * liquidio_vf_remove - Cleans up resources at unload time
747  * @pdev: PCI device structure
748  */
749 static void liquidio_vf_remove(struct pci_dev *pdev)
750 {
751         struct octeon_device *oct_dev = pci_get_drvdata(pdev);
752
753         dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
754
755         if (oct_dev->app_mode == CVM_DRV_NIC_APP)
756                 liquidio_stop_nic_module(oct_dev);
757
758         /* Reset the octeon device and cleanup all memory allocated for
759          * the octeon device by driver.
760          */
761         octeon_destroy_resources(oct_dev);
762
763         dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
764
765         /* This octeon device has been removed. Update the global
766          * data structure to reflect this. Free the device structure.
767          */
768         octeon_free_device_mem(oct_dev);
769 }
770
771 /**
772  * octeon_pci_os_setup - PCI initialization for each Octeon device.
773  * @oct: octeon device
774  */
775 static int octeon_pci_os_setup(struct octeon_device *oct)
776 {
777 #ifdef CONFIG_PCI_IOV
778         /* setup PCI stuff first */
779         if (!oct->pci_dev->physfn)
780                 octeon_pci_flr(oct);
781 #endif
782
783         if (pci_enable_device(oct->pci_dev)) {
784                 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
785                 return 1;
786         }
787
788         if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
789                 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
790                 pci_disable_device(oct->pci_dev);
791                 return 1;
792         }
793
794         /* Enable PCI DMA Master. */
795         pci_set_master(oct->pci_dev);
796
797         return 0;
798 }
799
800 /**
801  * free_netbuf - Unmap and free network buffer
802  * @buf: buffer
803  */
804 static void free_netbuf(void *buf)
805 {
806         struct octnet_buf_free_info *finfo;
807         struct sk_buff *skb;
808         struct lio *lio;
809
810         finfo = (struct octnet_buf_free_info *)buf;
811         skb = finfo->skb;
812         lio = finfo->lio;
813
814         dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
815                          DMA_TO_DEVICE);
816
817         tx_buffer_free(skb);
818 }
819
820 /**
821  * free_netsgbuf - Unmap and free gather buffer
822  * @buf: buffer
823  */
824 static void free_netsgbuf(void *buf)
825 {
826         struct octnet_buf_free_info *finfo;
827         struct octnic_gather *g;
828         struct sk_buff *skb;
829         int i, frags, iq;
830         struct lio *lio;
831
832         finfo = (struct octnet_buf_free_info *)buf;
833         skb = finfo->skb;
834         lio = finfo->lio;
835         g = finfo->g;
836         frags = skb_shinfo(skb)->nr_frags;
837
838         dma_unmap_single(&lio->oct_dev->pci_dev->dev,
839                          g->sg[0].ptr[0], (skb->len - skb->data_len),
840                          DMA_TO_DEVICE);
841
842         i = 1;
843         while (frags--) {
844                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
845
846                 dma_unmap_page(&lio->oct_dev->pci_dev->dev,
847                                g->sg[(i >> 2)].ptr[(i & 3)],
848                                skb_frag_size(frag), DMA_TO_DEVICE);
849                 i++;
850         }
851
852         iq = skb_iq(lio->oct_dev, skb);
853
854         spin_lock(&lio->glist_lock[iq]);
855         list_add_tail(&g->list, &lio->glist[iq]);
856         spin_unlock(&lio->glist_lock[iq]);
857
858         tx_buffer_free(skb);
859 }
860
861 /**
862  * free_netsgbuf_with_resp - Unmap and free gather buffer with response
863  * @buf: buffer
864  */
865 static void free_netsgbuf_with_resp(void *buf)
866 {
867         struct octnet_buf_free_info *finfo;
868         struct octeon_soft_command *sc;
869         struct octnic_gather *g;
870         struct sk_buff *skb;
871         int i, frags, iq;
872         struct lio *lio;
873
874         sc = (struct octeon_soft_command *)buf;
875         skb = (struct sk_buff *)sc->callback_arg;
876         finfo = (struct octnet_buf_free_info *)&skb->cb;
877
878         lio = finfo->lio;
879         g = finfo->g;
880         frags = skb_shinfo(skb)->nr_frags;
881
882         dma_unmap_single(&lio->oct_dev->pci_dev->dev,
883                          g->sg[0].ptr[0], (skb->len - skb->data_len),
884                          DMA_TO_DEVICE);
885
886         i = 1;
887         while (frags--) {
888                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
889
890                 dma_unmap_page(&lio->oct_dev->pci_dev->dev,
891                                g->sg[(i >> 2)].ptr[(i & 3)],
892                                skb_frag_size(frag), DMA_TO_DEVICE);
893                 i++;
894         }
895
896         iq = skb_iq(lio->oct_dev, skb);
897
898         spin_lock(&lio->glist_lock[iq]);
899         list_add_tail(&g->list, &lio->glist[iq]);
900         spin_unlock(&lio->glist_lock[iq]);
901
902         /* Don't free the skb yet */
903 }
904
905 /**
906  * liquidio_open - Net device open for LiquidIO
907  * @netdev: network device
908  */
909 static int liquidio_open(struct net_device *netdev)
910 {
911         struct lio *lio = GET_LIO(netdev);
912         struct octeon_device *oct = lio->oct_dev;
913         struct octeon_device_priv *oct_priv =
914                 (struct octeon_device_priv *)oct->priv;
915         struct napi_struct *napi, *n;
916         int ret = 0;
917
918         if (!oct->props[lio->ifidx].napi_enabled) {
919                 tasklet_disable(&oct_priv->droq_tasklet);
920
921                 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
922                         napi_enable(napi);
923
924                 oct->props[lio->ifidx].napi_enabled = 1;
925
926                 oct->droq[0]->ops.poll_mode = 1;
927         }
928
929         ifstate_set(lio, LIO_IFSTATE_RUNNING);
930
931         /* Ready for link status updates */
932         lio->intf_open = 1;
933
934         netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
935         start_txqs(netdev);
936
937         INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
938         lio->stats_wk.ctxptr = lio;
939         schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
940                                         (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
941
942         /* tell Octeon to start forwarding packets to host */
943         ret = send_rx_ctrl_cmd(lio, 1);
944         if (ret)
945                 return ret;
946
947         dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
948
949         return ret;
950 }
951
952 /**
953  * liquidio_stop - jNet device stop for LiquidIO
954  * @netdev: network device
955  */
956 static int liquidio_stop(struct net_device *netdev)
957 {
958         struct lio *lio = GET_LIO(netdev);
959         struct octeon_device *oct = lio->oct_dev;
960         struct octeon_device_priv *oct_priv =
961                 (struct octeon_device_priv *)oct->priv;
962         struct napi_struct *napi, *n;
963         int ret = 0;
964
965         /* tell Octeon to stop forwarding packets to host */
966         ret = send_rx_ctrl_cmd(lio, 0);
967         if (ret)
968                 return ret;
969
970         netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
971         /* Inform that netif carrier is down */
972         lio->intf_open = 0;
973         lio->linfo.link.s.link_up = 0;
974
975         netif_carrier_off(netdev);
976         lio->link_changes++;
977
978         ifstate_reset(lio, LIO_IFSTATE_RUNNING);
979
980         stop_txqs(netdev);
981
982         /* Wait for any pending Rx descriptors */
983         if (lio_wait_for_clean_oq(oct))
984                 netif_info(lio, rx_err, lio->netdev,
985                            "Proceeding with stop interface after partial RX desc processing\n");
986
987         if (oct->props[lio->ifidx].napi_enabled == 1) {
988                 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
989                         napi_disable(napi);
990
991                 oct->props[lio->ifidx].napi_enabled = 0;
992
993                 oct->droq[0]->ops.poll_mode = 0;
994
995                 tasklet_enable(&oct_priv->droq_tasklet);
996         }
997
998         cancel_delayed_work_sync(&lio->stats_wk.work);
999
1000         dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1001
1002         return ret;
1003 }
1004
1005 /**
1006  * get_new_flags - Converts a mask based on net device flags
1007  * @netdev: network device
1008  *
1009  * This routine generates a octnet_ifflags mask from the net device flags
1010  * received from the OS.
1011  */
1012 static enum octnet_ifflags get_new_flags(struct net_device *netdev)
1013 {
1014         enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1015
1016         if (netdev->flags & IFF_PROMISC)
1017                 f |= OCTNET_IFFLAG_PROMISC;
1018
1019         if (netdev->flags & IFF_ALLMULTI)
1020                 f |= OCTNET_IFFLAG_ALLMULTI;
1021
1022         if (netdev->flags & IFF_MULTICAST) {
1023                 f |= OCTNET_IFFLAG_MULTICAST;
1024
1025                 /* Accept all multicast addresses if there are more than we
1026                  * can handle
1027                  */
1028                 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1029                         f |= OCTNET_IFFLAG_ALLMULTI;
1030         }
1031
1032         if (netdev->flags & IFF_BROADCAST)
1033                 f |= OCTNET_IFFLAG_BROADCAST;
1034
1035         return f;
1036 }
1037
1038 static void liquidio_set_uc_list(struct net_device *netdev)
1039 {
1040         struct lio *lio = GET_LIO(netdev);
1041         struct octeon_device *oct = lio->oct_dev;
1042         struct octnic_ctrl_pkt nctrl;
1043         struct netdev_hw_addr *ha;
1044         u64 *mac;
1045
1046         if (lio->netdev_uc_count == netdev_uc_count(netdev))
1047                 return;
1048
1049         if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) {
1050                 dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n");
1051                 return;
1052         }
1053
1054         lio->netdev_uc_count = netdev_uc_count(netdev);
1055
1056         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1057         nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST;
1058         nctrl.ncmd.s.more = lio->netdev_uc_count;
1059         nctrl.ncmd.s.param1 = oct->vf_num;
1060         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1061         nctrl.netpndev = (u64)netdev;
1062         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1063
1064         /* copy all the addresses into the udd */
1065         mac = &nctrl.udd[0];
1066         netdev_for_each_uc_addr(ha, netdev) {
1067                 ether_addr_copy(((u8 *)mac) + 2, ha->addr);
1068                 mac++;
1069         }
1070
1071         octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1072 }
1073
1074 /**
1075  * liquidio_set_mcast_list - Net device set_multicast_list
1076  * @netdev: network device
1077  */
1078 static void liquidio_set_mcast_list(struct net_device *netdev)
1079 {
1080         int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1081         struct lio *lio = GET_LIO(netdev);
1082         struct octeon_device *oct = lio->oct_dev;
1083         struct octnic_ctrl_pkt nctrl;
1084         struct netdev_hw_addr *ha;
1085         u64 *mc;
1086         int ret;
1087
1088         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1089
1090         /* Create a ctrl pkt command to be sent to core app. */
1091         nctrl.ncmd.u64 = 0;
1092         nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1093         nctrl.ncmd.s.param1 = get_new_flags(netdev);
1094         nctrl.ncmd.s.param2 = mc_count;
1095         nctrl.ncmd.s.more = mc_count;
1096         nctrl.netpndev = (u64)netdev;
1097         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1098
1099         /* copy all the addresses into the udd */
1100         mc = &nctrl.udd[0];
1101         netdev_for_each_mc_addr(ha, netdev) {
1102                 *mc = 0;
1103                 ether_addr_copy(((u8 *)mc) + 2, ha->addr);
1104                 /* no need to swap bytes */
1105                 if (++mc > &nctrl.udd[mc_count])
1106                         break;
1107         }
1108
1109         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1110
1111         /* Apparently, any activity in this call from the kernel has to
1112          * be atomic. So we won't wait for response.
1113          */
1114
1115         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1116         if (ret) {
1117                 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1118                         ret);
1119         }
1120
1121         liquidio_set_uc_list(netdev);
1122 }
1123
1124 /**
1125  * liquidio_set_mac - Net device set_mac_address
1126  * @netdev: network device
1127  * @p: opaque pointer to sockaddr
1128  */
1129 static int liquidio_set_mac(struct net_device *netdev, void *p)
1130 {
1131         struct sockaddr *addr = (struct sockaddr *)p;
1132         struct lio *lio = GET_LIO(netdev);
1133         struct octeon_device *oct = lio->oct_dev;
1134         struct octnic_ctrl_pkt nctrl;
1135         int ret = 0;
1136
1137         if (!is_valid_ether_addr(addr->sa_data))
1138                 return -EADDRNOTAVAIL;
1139
1140         if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
1141                 return 0;
1142
1143         if (lio->linfo.macaddr_is_admin_asgnd)
1144                 return -EPERM;
1145
1146         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1147
1148         nctrl.ncmd.u64 = 0;
1149         nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
1150         nctrl.ncmd.s.param1 = 0;
1151         nctrl.ncmd.s.more = 1;
1152         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1153         nctrl.netpndev = (u64)netdev;
1154
1155         nctrl.udd[0] = 0;
1156         /* The MAC Address is presented in network byte order. */
1157         ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data);
1158
1159         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1160         if (ret < 0) {
1161                 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
1162                 return -ENOMEM;
1163         }
1164
1165         if (nctrl.sc_status ==
1166             FIRMWARE_STATUS_CODE(OCTEON_REQUEST_NO_PERMISSION)) {
1167                 dev_err(&oct->pci_dev->dev, "MAC Address change failed: no permission\n");
1168                 return -EPERM;
1169         }
1170
1171         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1172         ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
1173
1174         return 0;
1175 }
1176
1177 static void
1178 liquidio_get_stats64(struct net_device *netdev,
1179                      struct rtnl_link_stats64 *lstats)
1180 {
1181         struct lio *lio = GET_LIO(netdev);
1182         struct octeon_device *oct;
1183         u64 pkts = 0, drop = 0, bytes = 0;
1184         struct oct_droq_stats *oq_stats;
1185         struct oct_iq_stats *iq_stats;
1186         int i, iq_no, oq_no;
1187
1188         oct = lio->oct_dev;
1189
1190         if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1191                 return;
1192
1193         for (i = 0; i < oct->num_iqs; i++) {
1194                 iq_no = lio->linfo.txpciq[i].s.q_no;
1195                 iq_stats = &oct->instr_queue[iq_no]->stats;
1196                 pkts += iq_stats->tx_done;
1197                 drop += iq_stats->tx_dropped;
1198                 bytes += iq_stats->tx_tot_bytes;
1199         }
1200
1201         lstats->tx_packets = pkts;
1202         lstats->tx_bytes = bytes;
1203         lstats->tx_dropped = drop;
1204
1205         pkts = 0;
1206         drop = 0;
1207         bytes = 0;
1208
1209         for (i = 0; i < oct->num_oqs; i++) {
1210                 oq_no = lio->linfo.rxpciq[i].s.q_no;
1211                 oq_stats = &oct->droq[oq_no]->stats;
1212                 pkts += oq_stats->rx_pkts_received;
1213                 drop += (oq_stats->rx_dropped +
1214                          oq_stats->dropped_nodispatch +
1215                          oq_stats->dropped_toomany +
1216                          oq_stats->dropped_nomem);
1217                 bytes += oq_stats->rx_bytes_received;
1218         }
1219
1220         lstats->rx_bytes = bytes;
1221         lstats->rx_packets = pkts;
1222         lstats->rx_dropped = drop;
1223
1224         lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
1225
1226         /* detailed rx_errors: */
1227         lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
1228         /* recved pkt with crc error */
1229         lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
1230         /* recv'd frame alignment error */
1231         lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
1232
1233         lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
1234                             lstats->rx_frame_errors;
1235
1236         /* detailed tx_errors */
1237         lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
1238         lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
1239
1240         lstats->tx_errors = lstats->tx_aborted_errors +
1241                 lstats->tx_carrier_errors;
1242 }
1243
1244 /**
1245  * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
1246  * @netdev: network device
1247  * @ifr: interface request
1248  */
1249 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
1250 {
1251         struct lio *lio = GET_LIO(netdev);
1252         struct hwtstamp_config conf;
1253
1254         if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
1255                 return -EFAULT;
1256
1257         if (conf.flags)
1258                 return -EINVAL;
1259
1260         switch (conf.tx_type) {
1261         case HWTSTAMP_TX_ON:
1262         case HWTSTAMP_TX_OFF:
1263                 break;
1264         default:
1265                 return -ERANGE;
1266         }
1267
1268         switch (conf.rx_filter) {
1269         case HWTSTAMP_FILTER_NONE:
1270                 break;
1271         case HWTSTAMP_FILTER_ALL:
1272         case HWTSTAMP_FILTER_SOME:
1273         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1274         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1275         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1276         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1277         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1278         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1279         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1280         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1281         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1282         case HWTSTAMP_FILTER_PTP_V2_EVENT:
1283         case HWTSTAMP_FILTER_PTP_V2_SYNC:
1284         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1285         case HWTSTAMP_FILTER_NTP_ALL:
1286                 conf.rx_filter = HWTSTAMP_FILTER_ALL;
1287                 break;
1288         default:
1289                 return -ERANGE;
1290         }
1291
1292         if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
1293                 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1294
1295         else
1296                 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1297
1298         return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
1299 }
1300
1301 /**
1302  * liquidio_ioctl - ioctl handler
1303  * @netdev: network device
1304  * @ifr: interface request
1305  * @cmd: command
1306  */
1307 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1308 {
1309         switch (cmd) {
1310         case SIOCSHWTSTAMP:
1311                 return hwtstamp_ioctl(netdev, ifr);
1312         default:
1313                 return -EOPNOTSUPP;
1314         }
1315 }
1316
1317 static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
1318 {
1319         struct sk_buff *skb = (struct sk_buff *)buf;
1320         struct octnet_buf_free_info *finfo;
1321         struct oct_timestamp_resp *resp;
1322         struct octeon_soft_command *sc;
1323         struct lio *lio;
1324
1325         finfo = (struct octnet_buf_free_info *)skb->cb;
1326         lio = finfo->lio;
1327         sc = finfo->sc;
1328         oct = lio->oct_dev;
1329         resp = (struct oct_timestamp_resp *)sc->virtrptr;
1330
1331         if (status != OCTEON_REQUEST_DONE) {
1332                 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
1333                         CVM_CAST64(status));
1334                 resp->timestamp = 0;
1335         }
1336
1337         octeon_swap_8B_data(&resp->timestamp, 1);
1338
1339         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
1340                 struct skb_shared_hwtstamps ts;
1341                 u64 ns = resp->timestamp;
1342
1343                 netif_info(lio, tx_done, lio->netdev,
1344                            "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
1345                            skb, (unsigned long long)ns);
1346                 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
1347                 skb_tstamp_tx(skb, &ts);
1348         }
1349
1350         octeon_free_soft_command(oct, sc);
1351         tx_buffer_free(skb);
1352 }
1353
1354 /* send_nic_timestamp_pkt - Send a data packet that will be timestamped
1355  * @oct: octeon device
1356  * @ndata: pointer to network data
1357  * @finfo: pointer to private network data
1358  */
1359 static int send_nic_timestamp_pkt(struct octeon_device *oct,
1360                                   struct octnic_data_pkt *ndata,
1361                                   struct octnet_buf_free_info *finfo,
1362                                   int xmit_more)
1363 {
1364         struct octeon_soft_command *sc;
1365         int ring_doorbell;
1366         struct lio *lio;
1367         int retval;
1368         u32 len;
1369
1370         lio = finfo->lio;
1371
1372         sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
1373                                             sizeof(struct oct_timestamp_resp));
1374         finfo->sc = sc;
1375
1376         if (!sc) {
1377                 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
1378                 return IQ_SEND_FAILED;
1379         }
1380
1381         if (ndata->reqtype == REQTYPE_NORESP_NET)
1382                 ndata->reqtype = REQTYPE_RESP_NET;
1383         else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
1384                 ndata->reqtype = REQTYPE_RESP_NET_SG;
1385
1386         sc->callback = handle_timestamp;
1387         sc->callback_arg = finfo->skb;
1388         sc->iq_no = ndata->q_no;
1389
1390         len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz;
1391
1392         ring_doorbell = !xmit_more;
1393
1394         retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
1395                                      sc, len, ndata->reqtype);
1396
1397         if (retval == IQ_SEND_FAILED) {
1398                 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
1399                         retval);
1400                 octeon_free_soft_command(oct, sc);
1401         } else {
1402                 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
1403         }
1404
1405         return retval;
1406 }
1407
1408 /**
1409  * liquidio_xmit - Transmit networks packets to the Octeon interface
1410  * @skb: skbuff struct to be passed to network layer.
1411  * @netdev: pointer to network device
1412  * @returns whether the packet was transmitted to the device okay or not
1413  *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
1414  */
1415 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
1416 {
1417         struct octnet_buf_free_info *finfo;
1418         union octnic_cmd_setup cmdsetup;
1419         struct octnic_data_pkt ndata;
1420         struct octeon_instr_irh *irh;
1421         struct oct_iq_stats *stats;
1422         struct octeon_device *oct;
1423         int q_idx = 0, iq_no = 0;
1424         union tx_info *tx_info;
1425         int xmit_more = 0;
1426         struct lio *lio;
1427         int status = 0;
1428         u64 dptr = 0;
1429         u32 tag = 0;
1430         int j;
1431
1432         lio = GET_LIO(netdev);
1433         oct = lio->oct_dev;
1434
1435         q_idx = skb_iq(lio->oct_dev, skb);
1436         tag = q_idx;
1437         iq_no = lio->linfo.txpciq[q_idx].s.q_no;
1438
1439         stats = &oct->instr_queue[iq_no]->stats;
1440
1441         /* Check for all conditions in which the current packet cannot be
1442          * transmitted.
1443          */
1444         if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
1445             (!lio->linfo.link.s.link_up) || (skb->len <= 0)) {
1446                 netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n",
1447                            lio->linfo.link.s.link_up);
1448                 goto lio_xmit_failed;
1449         }
1450
1451         /* Use space in skb->cb to store info used to unmap and
1452          * free the buffers.
1453          */
1454         finfo = (struct octnet_buf_free_info *)skb->cb;
1455         finfo->lio = lio;
1456         finfo->skb = skb;
1457         finfo->sc = NULL;
1458
1459         /* Prepare the attributes for the data to be passed to OSI. */
1460         memset(&ndata, 0, sizeof(struct octnic_data_pkt));
1461
1462         ndata.buf = finfo;
1463
1464         ndata.q_no = iq_no;
1465
1466         if (octnet_iq_is_full(oct, ndata.q_no)) {
1467                 /* defer sending if queue is full */
1468                 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
1469                            ndata.q_no);
1470                 stats->tx_iq_busy++;
1471                 return NETDEV_TX_BUSY;
1472         }
1473
1474         ndata.datasize = skb->len;
1475
1476         cmdsetup.u64 = 0;
1477         cmdsetup.s.iq_no = iq_no;
1478
1479         if (skb->ip_summed == CHECKSUM_PARTIAL) {
1480                 if (skb->encapsulation) {
1481                         cmdsetup.s.tnl_csum = 1;
1482                         stats->tx_vxlan++;
1483                 } else {
1484                         cmdsetup.s.transport_csum = 1;
1485                 }
1486         }
1487         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1488                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1489                 cmdsetup.s.timestamp = 1;
1490         }
1491
1492         if (!skb_shinfo(skb)->nr_frags) {
1493                 cmdsetup.s.u.datasize = skb->len;
1494                 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1495                 /* Offload checksum calculation for TCP/UDP packets */
1496                 dptr = dma_map_single(&oct->pci_dev->dev,
1497                                       skb->data,
1498                                       skb->len,
1499                                       DMA_TO_DEVICE);
1500                 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
1501                         dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
1502                                 __func__);
1503                         return NETDEV_TX_BUSY;
1504                 }
1505
1506                 ndata.cmd.cmd3.dptr = dptr;
1507                 finfo->dptr = dptr;
1508                 ndata.reqtype = REQTYPE_NORESP_NET;
1509
1510         } else {
1511                 skb_frag_t *frag;
1512                 struct octnic_gather *g;
1513                 int i, frags;
1514
1515                 spin_lock(&lio->glist_lock[q_idx]);
1516                 g = (struct octnic_gather *)
1517                         lio_list_delete_head(&lio->glist[q_idx]);
1518                 spin_unlock(&lio->glist_lock[q_idx]);
1519
1520                 if (!g) {
1521                         netif_info(lio, tx_err, lio->netdev,
1522                                    "Transmit scatter gather: glist null!\n");
1523                         goto lio_xmit_failed;
1524                 }
1525
1526                 cmdsetup.s.gather = 1;
1527                 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
1528                 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1529
1530                 memset(g->sg, 0, g->sg_size);
1531
1532                 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
1533                                                  skb->data,
1534                                                  (skb->len - skb->data_len),
1535                                                  DMA_TO_DEVICE);
1536                 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
1537                         dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
1538                                 __func__);
1539                         return NETDEV_TX_BUSY;
1540                 }
1541                 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
1542
1543                 frags = skb_shinfo(skb)->nr_frags;
1544                 i = 1;
1545                 while (frags--) {
1546                         frag = &skb_shinfo(skb)->frags[i - 1];
1547
1548                         g->sg[(i >> 2)].ptr[(i & 3)] =
1549                                 skb_frag_dma_map(&oct->pci_dev->dev,
1550                                                  frag, 0, skb_frag_size(frag),
1551                                                  DMA_TO_DEVICE);
1552                         if (dma_mapping_error(&oct->pci_dev->dev,
1553                                               g->sg[i >> 2].ptr[i & 3])) {
1554                                 dma_unmap_single(&oct->pci_dev->dev,
1555                                                  g->sg[0].ptr[0],
1556                                                  skb->len - skb->data_len,
1557                                                  DMA_TO_DEVICE);
1558                                 for (j = 1; j < i; j++) {
1559                                         frag = &skb_shinfo(skb)->frags[j - 1];
1560                                         dma_unmap_page(&oct->pci_dev->dev,
1561                                                        g->sg[j >> 2].ptr[j & 3],
1562                                                        skb_frag_size(frag),
1563                                                        DMA_TO_DEVICE);
1564                                 }
1565                                 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
1566                                         __func__);
1567                                 return NETDEV_TX_BUSY;
1568                         }
1569
1570                         add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
1571                                     (i & 3));
1572                         i++;
1573                 }
1574
1575                 dptr = g->sg_dma_ptr;
1576
1577                 ndata.cmd.cmd3.dptr = dptr;
1578                 finfo->dptr = dptr;
1579                 finfo->g = g;
1580
1581                 ndata.reqtype = REQTYPE_NORESP_NET_SG;
1582         }
1583
1584         irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
1585         tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
1586
1587         if (skb_shinfo(skb)->gso_size) {
1588                 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
1589                 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
1590         }
1591
1592         /* HW insert VLAN tag */
1593         if (skb_vlan_tag_present(skb)) {
1594                 irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
1595                 irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
1596         }
1597
1598         xmit_more = netdev_xmit_more();
1599
1600         if (unlikely(cmdsetup.s.timestamp))
1601                 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
1602         else
1603                 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
1604         if (status == IQ_SEND_FAILED)
1605                 goto lio_xmit_failed;
1606
1607         netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
1608
1609         if (status == IQ_SEND_STOP) {
1610                 dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n",
1611                         iq_no);
1612                 netif_stop_subqueue(netdev, q_idx);
1613         }
1614
1615         netif_trans_update(netdev);
1616
1617         if (tx_info->s.gso_segs)
1618                 stats->tx_done += tx_info->s.gso_segs;
1619         else
1620                 stats->tx_done++;
1621         stats->tx_tot_bytes += ndata.datasize;
1622
1623         return NETDEV_TX_OK;
1624
1625 lio_xmit_failed:
1626         stats->tx_dropped++;
1627         netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
1628                    iq_no, stats->tx_dropped);
1629         if (dptr)
1630                 dma_unmap_single(&oct->pci_dev->dev, dptr,
1631                                  ndata.datasize, DMA_TO_DEVICE);
1632
1633         octeon_ring_doorbell_locked(oct, iq_no);
1634
1635         tx_buffer_free(skb);
1636         return NETDEV_TX_OK;
1637 }
1638
1639 /**
1640  * liquidio_tx_timeout - Network device Tx timeout
1641  * @netdev: pointer to network device
1642  * @txqueue: index of the hung transmit queue
1643  */
1644 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1645 {
1646         struct lio *lio;
1647
1648         lio = GET_LIO(netdev);
1649
1650         netif_info(lio, tx_err, lio->netdev,
1651                    "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
1652                    netdev->stats.tx_dropped);
1653         netif_trans_update(netdev);
1654         wake_txqs(netdev);
1655 }
1656
1657 static int
1658 liquidio_vlan_rx_add_vid(struct net_device *netdev,
1659                          __be16 proto __attribute__((unused)), u16 vid)
1660 {
1661         struct lio *lio = GET_LIO(netdev);
1662         struct octeon_device *oct = lio->oct_dev;
1663         struct octnic_ctrl_pkt nctrl;
1664         int ret = 0;
1665
1666         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1667
1668         nctrl.ncmd.u64 = 0;
1669         nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
1670         nctrl.ncmd.s.param1 = vid;
1671         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1672         nctrl.netpndev = (u64)netdev;
1673         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1674
1675         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1676         if (ret) {
1677                 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
1678                         ret);
1679                 return -EPERM;
1680         }
1681
1682         return 0;
1683 }
1684
1685 static int
1686 liquidio_vlan_rx_kill_vid(struct net_device *netdev,
1687                           __be16 proto __attribute__((unused)), u16 vid)
1688 {
1689         struct lio *lio = GET_LIO(netdev);
1690         struct octeon_device *oct = lio->oct_dev;
1691         struct octnic_ctrl_pkt nctrl;
1692         int ret = 0;
1693
1694         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1695
1696         nctrl.ncmd.u64 = 0;
1697         nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
1698         nctrl.ncmd.s.param1 = vid;
1699         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1700         nctrl.netpndev = (u64)netdev;
1701         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1702
1703         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1704         if (ret) {
1705                 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
1706                         ret);
1707                 if (ret > 0)
1708                         ret = -EIO;
1709         }
1710         return ret;
1711 }
1712
1713 /** Sending command to enable/disable RX checksum offload
1714  * @param netdev                pointer to network device
1715  * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
1716  * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
1717  *                              OCTNET_CMD_RXCSUM_DISABLE
1718  * @returns                     SUCCESS or FAILURE
1719  */
1720 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
1721                                        u8 rx_cmd)
1722 {
1723         struct lio *lio = GET_LIO(netdev);
1724         struct octeon_device *oct = lio->oct_dev;
1725         struct octnic_ctrl_pkt nctrl;
1726         int ret = 0;
1727
1728         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1729
1730         nctrl.ncmd.u64 = 0;
1731         nctrl.ncmd.s.cmd = command;
1732         nctrl.ncmd.s.param1 = rx_cmd;
1733         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1734         nctrl.netpndev = (u64)netdev;
1735         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1736
1737         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1738         if (ret) {
1739                 dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n",
1740                         ret);
1741                 if (ret > 0)
1742                         ret = -EIO;
1743         }
1744         return ret;
1745 }
1746
1747 /** Sending command to add/delete VxLAN UDP port to firmware
1748  * @param netdev                pointer to network device
1749  * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
1750  * @param vxlan_port            VxLAN port to be added or deleted
1751  * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
1752  *                              OCTNET_CMD_VXLAN_PORT_DEL
1753  * @returns                     SUCCESS or FAILURE
1754  */
1755 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
1756                                        u16 vxlan_port, u8 vxlan_cmd_bit)
1757 {
1758         struct lio *lio = GET_LIO(netdev);
1759         struct octeon_device *oct = lio->oct_dev;
1760         struct octnic_ctrl_pkt nctrl;
1761         int ret = 0;
1762
1763         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1764
1765         nctrl.ncmd.u64 = 0;
1766         nctrl.ncmd.s.cmd = command;
1767         nctrl.ncmd.s.more = vxlan_cmd_bit;
1768         nctrl.ncmd.s.param1 = vxlan_port;
1769         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1770         nctrl.netpndev = (u64)netdev;
1771         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1772
1773         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1774         if (ret) {
1775                 dev_err(&oct->pci_dev->dev,
1776                         "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n",
1777                         ret);
1778                 if (ret > 0)
1779                         ret = -EIO;
1780         }
1781         return ret;
1782 }
1783
1784 static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
1785                                         unsigned int table, unsigned int entry,
1786                                         struct udp_tunnel_info *ti)
1787 {
1788         return liquidio_vxlan_port_command(netdev,
1789                                            OCTNET_CMD_VXLAN_PORT_CONFIG,
1790                                            htons(ti->port),
1791                                            OCTNET_CMD_VXLAN_PORT_ADD);
1792 }
1793
1794 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
1795                                           unsigned int table,
1796                                           unsigned int entry,
1797                                           struct udp_tunnel_info *ti)
1798 {
1799         return liquidio_vxlan_port_command(netdev,
1800                                            OCTNET_CMD_VXLAN_PORT_CONFIG,
1801                                            htons(ti->port),
1802                                            OCTNET_CMD_VXLAN_PORT_DEL);
1803 }
1804
1805 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
1806         .set_port       = liquidio_udp_tunnel_set_port,
1807         .unset_port     = liquidio_udp_tunnel_unset_port,
1808         .tables         = {
1809                 { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
1810         },
1811 };
1812
1813 /** \brief Net device fix features
1814  * @param netdev  pointer to network device
1815  * @param request features requested
1816  * @returns updated features list
1817  */
1818 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
1819                                                netdev_features_t request)
1820 {
1821         struct lio *lio = netdev_priv(netdev);
1822
1823         if ((request & NETIF_F_RXCSUM) &&
1824             !(lio->dev_capability & NETIF_F_RXCSUM))
1825                 request &= ~NETIF_F_RXCSUM;
1826
1827         if ((request & NETIF_F_HW_CSUM) &&
1828             !(lio->dev_capability & NETIF_F_HW_CSUM))
1829                 request &= ~NETIF_F_HW_CSUM;
1830
1831         if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
1832                 request &= ~NETIF_F_TSO;
1833
1834         if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
1835                 request &= ~NETIF_F_TSO6;
1836
1837         if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
1838                 request &= ~NETIF_F_LRO;
1839
1840         /* Disable LRO if RXCSUM is off */
1841         if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
1842             (lio->dev_capability & NETIF_F_LRO))
1843                 request &= ~NETIF_F_LRO;
1844
1845         return request;
1846 }
1847
1848 /** \brief Net device set features
1849  * @param netdev  pointer to network device
1850  * @param features features to enable/disable
1851  */
1852 static int liquidio_set_features(struct net_device *netdev,
1853                                  netdev_features_t features)
1854 {
1855         struct lio *lio = netdev_priv(netdev);
1856
1857         if (!((netdev->features ^ features) & NETIF_F_LRO))
1858                 return 0;
1859
1860         if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
1861                 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
1862                                      OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
1863         else if (!(features & NETIF_F_LRO) &&
1864                  (lio->dev_capability & NETIF_F_LRO))
1865                 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
1866                                      OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
1867         if (!(netdev->features & NETIF_F_RXCSUM) &&
1868             (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1869             (features & NETIF_F_RXCSUM))
1870                 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
1871                                             OCTNET_CMD_RXCSUM_ENABLE);
1872         else if ((netdev->features & NETIF_F_RXCSUM) &&
1873                  (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1874                  !(features & NETIF_F_RXCSUM))
1875                 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
1876                                             OCTNET_CMD_RXCSUM_DISABLE);
1877
1878         return 0;
1879 }
1880
1881 static const struct net_device_ops lionetdevops = {
1882         .ndo_open               = liquidio_open,
1883         .ndo_stop               = liquidio_stop,
1884         .ndo_start_xmit         = liquidio_xmit,
1885         .ndo_get_stats64        = liquidio_get_stats64,
1886         .ndo_set_mac_address    = liquidio_set_mac,
1887         .ndo_set_rx_mode        = liquidio_set_mcast_list,
1888         .ndo_tx_timeout         = liquidio_tx_timeout,
1889         .ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
1890         .ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
1891         .ndo_change_mtu         = liquidio_change_mtu,
1892         .ndo_eth_ioctl          = liquidio_ioctl,
1893         .ndo_fix_features       = liquidio_fix_features,
1894         .ndo_set_features       = liquidio_set_features,
1895 };
1896
1897 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
1898 {
1899         struct octeon_device *oct = (struct octeon_device *)buf;
1900         struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
1901         union oct_link_status *ls;
1902         int gmxport = 0;
1903         int i;
1904
1905         if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
1906                 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
1907                         recv_pkt->buffer_size[0],
1908                         recv_pkt->rh.r_nic_info.gmxport);
1909                 goto nic_info_err;
1910         }
1911
1912         gmxport = recv_pkt->rh.r_nic_info.gmxport;
1913         ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
1914                 OCT_DROQ_INFO_SIZE);
1915
1916         octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
1917
1918         for (i = 0; i < oct->ifcount; i++) {
1919                 if (oct->props[i].gmxport == gmxport) {
1920                         update_link_status(oct->props[i].netdev, ls);
1921                         break;
1922                 }
1923         }
1924
1925 nic_info_err:
1926         for (i = 0; i < recv_pkt->buffer_count; i++)
1927                 recv_buffer_free(recv_pkt->buffer_ptr[i]);
1928         octeon_free_recv_info(recv_info);
1929         return 0;
1930 }
1931
1932 /**
1933  * setup_nic_devices - Setup network interfaces
1934  * @octeon_dev:  octeon device
1935  *
1936  * Called during init time for each device. It assumes the NIC
1937  * is already up and running.  The link information for each
1938  * interface is passed in link_info.
1939  */
1940 static int setup_nic_devices(struct octeon_device *octeon_dev)
1941 {
1942         int retval, num_iqueues, num_oqueues;
1943         u32 resp_size, data_size;
1944         struct liquidio_if_cfg_resp *resp;
1945         struct octeon_soft_command *sc;
1946         union oct_nic_if_cfg if_cfg;
1947         struct octdev_props *props;
1948         struct net_device *netdev;
1949         struct lio_version *vdata;
1950         struct lio *lio = NULL;
1951         u8 mac[ETH_ALEN], i, j;
1952         u32 ifidx_or_pfnum;
1953
1954         ifidx_or_pfnum = octeon_dev->pf_num;
1955
1956         /* This is to handle link status changes */
1957         octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO,
1958                                     lio_nic_info, octeon_dev);
1959
1960         /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
1961          * They are handled directly.
1962          */
1963         octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
1964                                         free_netbuf);
1965
1966         octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
1967                                         free_netsgbuf);
1968
1969         octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
1970                                         free_netsgbuf_with_resp);
1971
1972         for (i = 0; i < octeon_dev->ifcount; i++) {
1973                 resp_size = sizeof(struct liquidio_if_cfg_resp);
1974                 data_size = sizeof(struct lio_version);
1975                 sc = (struct octeon_soft_command *)
1976                         octeon_alloc_soft_command(octeon_dev, data_size,
1977                                                   resp_size, 0);
1978                 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1979                 vdata = (struct lio_version *)sc->virtdptr;
1980
1981                 *((u64 *)vdata) = 0;
1982                 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
1983                 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
1984                 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
1985
1986                 if_cfg.u64 = 0;
1987
1988                 if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf;
1989                 if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf;
1990                 if_cfg.s.base_queue = 0;
1991
1992                 sc->iq_no = 0;
1993
1994                 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
1995                                             OPCODE_NIC_IF_CFG, 0, if_cfg.u64,
1996                                             0);
1997
1998                 init_completion(&sc->complete);
1999                 sc->sc_status = OCTEON_REQUEST_PENDING;
2000
2001                 retval = octeon_send_soft_command(octeon_dev, sc);
2002                 if (retval == IQ_SEND_FAILED) {
2003                         dev_err(&octeon_dev->pci_dev->dev,
2004                                 "iq/oq config failed status: %x\n", retval);
2005                         /* Soft instr is freed by driver in case of failure. */
2006                         octeon_free_soft_command(octeon_dev, sc);
2007                         return(-EIO);
2008                 }
2009
2010                 /* Sleep on a wait queue till the cond flag indicates that the
2011                  * response arrived or timed-out.
2012                  */
2013                 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
2014                 if (retval)
2015                         return retval;
2016
2017                 retval = resp->status;
2018                 if (retval) {
2019                         dev_err(&octeon_dev->pci_dev->dev,
2020                                 "iq/oq config failed, retval = %d\n", retval);
2021                         WRITE_ONCE(sc->caller_is_done, true);
2022                         return -EIO;
2023                 }
2024
2025                 snprintf(octeon_dev->fw_info.liquidio_firmware_version,
2026                          32, "%s",
2027                          resp->cfg_info.liquidio_firmware_version);
2028
2029                 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
2030                                     (sizeof(struct liquidio_if_cfg_info)) >> 3);
2031
2032                 num_iqueues = hweight64(resp->cfg_info.iqmask);
2033                 num_oqueues = hweight64(resp->cfg_info.oqmask);
2034
2035                 if (!(num_iqueues) || !(num_oqueues)) {
2036                         dev_err(&octeon_dev->pci_dev->dev,
2037                                 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
2038                                 resp->cfg_info.iqmask, resp->cfg_info.oqmask);
2039                         WRITE_ONCE(sc->caller_is_done, true);
2040                         goto setup_nic_dev_done;
2041                 }
2042                 dev_dbg(&octeon_dev->pci_dev->dev,
2043                         "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
2044                         i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
2045                         num_iqueues, num_oqueues);
2046
2047                 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
2048
2049                 if (!netdev) {
2050                         dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
2051                         WRITE_ONCE(sc->caller_is_done, true);
2052                         goto setup_nic_dev_done;
2053                 }
2054
2055                 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
2056
2057                 /* Associate the routines that will handle different
2058                  * netdev tasks.
2059                  */
2060                 netdev->netdev_ops = &lionetdevops;
2061
2062                 lio = GET_LIO(netdev);
2063
2064                 memset(lio, 0, sizeof(struct lio));
2065
2066                 lio->ifidx = ifidx_or_pfnum;
2067
2068                 props = &octeon_dev->props[i];
2069                 props->gmxport = resp->cfg_info.linfo.gmxport;
2070                 props->netdev = netdev;
2071
2072                 lio->linfo.num_rxpciq = num_oqueues;
2073                 lio->linfo.num_txpciq = num_iqueues;
2074
2075                 for (j = 0; j < num_oqueues; j++) {
2076                         lio->linfo.rxpciq[j].u64 =
2077                             resp->cfg_info.linfo.rxpciq[j].u64;
2078                 }
2079                 for (j = 0; j < num_iqueues; j++) {
2080                         lio->linfo.txpciq[j].u64 =
2081                             resp->cfg_info.linfo.txpciq[j].u64;
2082                 }
2083
2084                 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
2085                 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
2086                 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
2087                 lio->linfo.macaddr_is_admin_asgnd =
2088                         resp->cfg_info.linfo.macaddr_is_admin_asgnd;
2089                 lio->linfo.macaddr_spoofchk =
2090                         resp->cfg_info.linfo.macaddr_spoofchk;
2091
2092                 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2093
2094                 lio->dev_capability = NETIF_F_HIGHDMA
2095                                       | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2096                                       | NETIF_F_SG | NETIF_F_RXCSUM
2097                                       | NETIF_F_TSO | NETIF_F_TSO6
2098                                       | NETIF_F_GRO
2099                                       | NETIF_F_LRO;
2100                 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
2101
2102                 /* Copy of transmit encapsulation capabilities:
2103                  * TSO, TSO6, Checksums for this device
2104                  */
2105                 lio->enc_dev_capability = NETIF_F_IP_CSUM
2106                                           | NETIF_F_IPV6_CSUM
2107                                           | NETIF_F_GSO_UDP_TUNNEL
2108                                           | NETIF_F_HW_CSUM | NETIF_F_SG
2109                                           | NETIF_F_RXCSUM
2110                                           | NETIF_F_TSO | NETIF_F_TSO6
2111                                           | NETIF_F_LRO;
2112
2113                 netdev->hw_enc_features =
2114                     (lio->enc_dev_capability & ~NETIF_F_LRO);
2115                 netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
2116
2117                 netdev->vlan_features = lio->dev_capability;
2118                 /* Add any unchangeable hw features */
2119                 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
2120                                        NETIF_F_HW_VLAN_CTAG_RX |
2121                                        NETIF_F_HW_VLAN_CTAG_TX;
2122
2123                 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
2124
2125                 netdev->hw_features = lio->dev_capability;
2126                 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2127
2128                 /* MTU range: 68 - 16000 */
2129                 netdev->min_mtu = LIO_MIN_MTU_SIZE;
2130                 netdev->max_mtu = LIO_MAX_MTU_SIZE;
2131
2132                 WRITE_ONCE(sc->caller_is_done, true);
2133
2134                 /* Point to the  properties for octeon device to which this
2135                  * interface belongs.
2136                  */
2137                 lio->oct_dev = octeon_dev;
2138                 lio->octprops = props;
2139                 lio->netdev = netdev;
2140
2141                 dev_dbg(&octeon_dev->pci_dev->dev,
2142                         "if%d gmx: %d hw_addr: 0x%llx\n", i,
2143                         lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
2144
2145                 /* 64-bit swap required on LE machines */
2146                 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
2147                 for (j = 0; j < ETH_ALEN; j++)
2148                         mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
2149
2150                 /* Copy MAC Address to OS network device structure */
2151                 ether_addr_copy(netdev->dev_addr, mac);
2152
2153                 if (liquidio_setup_io_queues(octeon_dev, i,
2154                                              lio->linfo.num_txpciq,
2155                                              lio->linfo.num_rxpciq)) {
2156                         dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
2157                         goto setup_nic_dev_free;
2158                 }
2159
2160                 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
2161
2162                 /* For VFs, enable Octeon device interrupts here,
2163                  * as this is contingent upon IO queue setup
2164                  */
2165                 octeon_dev->fn_list.enable_interrupt(octeon_dev,
2166                                                      OCTEON_ALL_INTR);
2167
2168                 /* By default all interfaces on a single Octeon uses the same
2169                  * tx and rx queues
2170                  */
2171                 lio->txq = lio->linfo.txpciq[0].s.q_no;
2172                 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
2173
2174                 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
2175                 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
2176
2177                 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
2178                         dev_err(&octeon_dev->pci_dev->dev,
2179                                 "Gather list allocation failed\n");
2180                         goto setup_nic_dev_free;
2181                 }
2182
2183                 /* Register ethtool support */
2184                 liquidio_set_ethtool_ops(netdev);
2185                 if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID)
2186                         octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
2187                 else
2188                         octeon_dev->priv_flags = 0x0;
2189
2190                 if (netdev->features & NETIF_F_LRO)
2191                         liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2192                                              OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2193
2194                 if (setup_link_status_change_wq(netdev))
2195                         goto setup_nic_dev_free;
2196
2197                 if (setup_rx_oom_poll_fn(netdev))
2198                         goto setup_nic_dev_free;
2199
2200                 /* Register the network device with the OS */
2201                 if (register_netdev(netdev)) {
2202                         dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
2203                         goto setup_nic_dev_free;
2204                 }
2205
2206                 dev_dbg(&octeon_dev->pci_dev->dev,
2207                         "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
2208                         i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2209                 netif_carrier_off(netdev);
2210                 lio->link_changes++;
2211
2212                 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
2213
2214                 /* Sending command to firmware to enable Rx checksum offload
2215                  * by default at the time of setup of Liquidio driver for
2216                  * this device
2217                  */
2218                 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2219                                             OCTNET_CMD_RXCSUM_ENABLE);
2220                 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
2221                                      OCTNET_CMD_TXCSUM_ENABLE);
2222
2223                 dev_dbg(&octeon_dev->pci_dev->dev,
2224                         "NIC ifidx:%d Setup successful\n", i);
2225
2226                 octeon_dev->no_speed_setting = 1;
2227         }
2228
2229         return 0;
2230
2231 setup_nic_dev_free:
2232
2233         while (i--) {
2234                 dev_err(&octeon_dev->pci_dev->dev,
2235                         "NIC ifidx:%d Setup failed\n", i);
2236                 liquidio_destroy_nic_device(octeon_dev, i);
2237         }
2238
2239 setup_nic_dev_done:
2240
2241         return -ENODEV;
2242 }
2243
2244 /**
2245  * liquidio_init_nic_module - initialize the NIC
2246  * @oct: octeon device
2247  *
2248  * This initialization routine is called once the Octeon device application is
2249  * up and running
2250  */
2251 static int liquidio_init_nic_module(struct octeon_device *oct)
2252 {
2253         int num_nic_ports = 1;
2254         int i, retval = 0;
2255
2256         dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
2257
2258         /* only default iq and oq were initialized
2259          * initialize the rest as well run port_config command for each port
2260          */
2261         oct->ifcount = num_nic_ports;
2262         memset(oct->props, 0,
2263                sizeof(struct octdev_props) * num_nic_ports);
2264
2265         for (i = 0; i < MAX_OCTEON_LINKS; i++)
2266                 oct->props[i].gmxport = -1;
2267
2268         retval = setup_nic_devices(oct);
2269         if (retval) {
2270                 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
2271                 goto octnet_init_failure;
2272         }
2273
2274         dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
2275
2276         return retval;
2277
2278 octnet_init_failure:
2279
2280         oct->ifcount = 0;
2281
2282         return retval;
2283 }
2284
2285 /**
2286  * octeon_device_init - Device initialization for each Octeon device that is probed
2287  * @oct:  octeon device
2288  */
2289 static int octeon_device_init(struct octeon_device *oct)
2290 {
2291         u32 rev_id;
2292         int j;
2293
2294         atomic_set(&oct->status, OCT_DEV_BEGIN_STATE);
2295
2296         /* Enable access to the octeon device and make its DMA capability
2297          * known to the OS.
2298          */
2299         if (octeon_pci_os_setup(oct))
2300                 return 1;
2301         atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE);
2302
2303         oct->chip_id = OCTEON_CN23XX_VF_VID;
2304         pci_read_config_dword(oct->pci_dev, 8, &rev_id);
2305         oct->rev_id = rev_id & 0xff;
2306
2307         if (cn23xx_setup_octeon_vf_device(oct))
2308                 return 1;
2309
2310         atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE);
2311
2312         oct->app_mode = CVM_DRV_NIC_APP;
2313
2314         /* Initialize the dispatch mechanism used to push packets arriving on
2315          * Octeon Output queues.
2316          */
2317         if (octeon_init_dispatch_list(oct))
2318                 return 1;
2319
2320         atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE);
2321
2322         if (octeon_set_io_queues_off(oct)) {
2323                 dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
2324                 return 1;
2325         }
2326
2327         if (oct->fn_list.setup_device_regs(oct)) {
2328                 dev_err(&oct->pci_dev->dev, "device registers configuration failed\n");
2329                 return 1;
2330         }
2331
2332         /* Initialize soft command buffer pool */
2333         if (octeon_setup_sc_buffer_pool(oct)) {
2334                 dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n");
2335                 return 1;
2336         }
2337         atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
2338
2339         /* Setup the data structures that manage this Octeon's Input queues. */
2340         if (octeon_setup_instr_queues(oct)) {
2341                 dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n");
2342                 return 1;
2343         }
2344         atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
2345
2346         /* Initialize lists to manage the requests of different types that
2347          * arrive from user & kernel applications for this octeon device.
2348          */
2349         if (octeon_setup_response_list(oct)) {
2350                 dev_err(&oct->pci_dev->dev, "Response list allocation failed\n");
2351                 return 1;
2352         }
2353         atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE);
2354
2355         if (octeon_setup_output_queues(oct)) {
2356                 dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n");
2357                 return 1;
2358         }
2359         atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);
2360
2361         if (oct->fn_list.setup_mbox(oct)) {
2362                 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
2363                 return 1;
2364         }
2365         atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
2366
2367         if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) {
2368                 dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
2369                 return 1;
2370         }
2371         atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
2372
2373         dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF: %d ioqs\n",
2374                  oct->sriov_info.rings_per_vf);
2375
2376         /* Setup the interrupt handler and record the INT SUM register address*/
2377         if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf))
2378                 return 1;
2379
2380         atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
2381
2382         /* ***************************************************************
2383          * The interrupts need to be enabled for the PF<-->VF handshake.
2384          * They are [re]-enabled after the PF<-->VF handshake so that the
2385          * correct OQ tick value is used (i.e. the value retrieved from
2386          * the PF as part of the handshake).
2387          */
2388
2389         /* Enable Octeon device interrupts */
2390         oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2391
2392         if (cn23xx_octeon_pfvf_handshake(oct))
2393                 return 1;
2394
2395         /* Here we [re]-enable the interrupts so that the correct OQ tick value
2396          * is used (i.e. the value that was retrieved during the handshake)
2397          */
2398
2399         /* Enable Octeon device interrupts */
2400         oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2401         /* *************************************************************** */
2402
2403         /* Enable the input and output queues for this Octeon device */
2404         if (oct->fn_list.enable_io_queues(oct)) {
2405                 dev_err(&oct->pci_dev->dev, "enabling io queues failed\n");
2406                 return 1;
2407         }
2408
2409         atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE);
2410
2411         atomic_set(&oct->status, OCT_DEV_HOST_OK);
2412
2413         /* Send Credit for Octeon Output queues. Credits are always sent after
2414          * the output queue is enabled.
2415          */
2416         for (j = 0; j < oct->num_oqs; j++)
2417                 writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg);
2418
2419         /* Packets can start arriving on the output queues from this point. */
2420
2421         atomic_set(&oct->status, OCT_DEV_CORE_OK);
2422
2423         atomic_set(&oct->status, OCT_DEV_RUNNING);
2424
2425         if (liquidio_init_nic_module(oct))
2426                 return 1;
2427
2428         return 0;
2429 }
2430
2431 static int __init liquidio_vf_init(void)
2432 {
2433         octeon_init_device_list(0);
2434         return pci_register_driver(&liquidio_vf_pci_driver);
2435 }
2436
2437 static void __exit liquidio_vf_exit(void)
2438 {
2439         pci_unregister_driver(&liquidio_vf_pci_driver);
2440
2441         pr_info("LiquidIO_VF network module is now unloaded\n");
2442 }
2443
2444 module_init(liquidio_vf_init);
2445 module_exit(liquidio_vf_exit);