1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <net/vxlan.h>
22 #include "liquidio_common.h"
23 #include "octeon_droq.h"
24 #include "octeon_iq.h"
25 #include "response_manager.h"
26 #include "octeon_device.h"
27 #include "octeon_nic.h"
28 #include "octeon_main.h"
29 #include "octeon_network.h"
30 #include "cn23xx_vf_device.h"
32 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
33 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
34 MODULE_LICENSE("GPL");
36 static int debug = -1;
37 module_param(debug, int, 0644);
38 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
40 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
42 struct oct_timestamp_resp {
51 #ifdef __BIG_ENDIAN_BITFIELD
63 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
64 #define OCTNIC_GSO_MAX_SIZE \
65 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
68 liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
69 static void liquidio_vf_remove(struct pci_dev *pdev);
70 static int octeon_device_init(struct octeon_device *oct);
71 static int liquidio_stop(struct net_device *netdev);
73 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
75 struct octeon_device_priv *oct_priv =
76 (struct octeon_device_priv *)oct->priv;
77 int retry = MAX_IO_PENDING_PKT_COUNT;
78 int pkt_cnt = 0, pending_pkts;
84 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
85 if (!(oct->io_qmask.oq & BIT_ULL(i)))
87 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
90 pending_pkts += pkt_cnt;
91 tasklet_schedule(&oct_priv->droq_tasklet);
94 schedule_timeout_uninterruptible(1);
96 } while (retry-- && pending_pkts);
102 * \brief Cause device to go quiet so it can be safely removed/reset/etc
103 * @param oct Pointer to Octeon device
105 static void pcierror_quiesce_device(struct octeon_device *oct)
109 /* Disable the input and output queues now. No more packets will
110 * arrive from Octeon, but we should wait for all packet processing
114 /* To allow for in-flight requests */
115 schedule_timeout_uninterruptible(100);
117 if (wait_for_pending_requests(oct))
118 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
120 /* Force all requests waiting to be fetched by OCTEON to complete. */
121 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
122 struct octeon_instr_queue *iq;
124 if (!(oct->io_qmask.iq & BIT_ULL(i)))
126 iq = oct->instr_queue[i];
128 if (atomic_read(&iq->instr_pending)) {
129 spin_lock_bh(&iq->lock);
131 iq->octeon_read_index = iq->host_write_index;
132 iq->stats.instr_processed +=
133 atomic_read(&iq->instr_pending);
134 lio_process_iq_request_list(oct, iq, 0);
135 spin_unlock_bh(&iq->lock);
139 /* Force all pending ordered list requests to time out. */
140 lio_process_ordered_list(oct, 1);
142 /* We do not need to wait for output queue packets to be processed. */
146 * \brief Cleanup PCI AER uncorrectable error status
147 * @param dev Pointer to PCI device
149 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
154 pr_info("%s :\n", __func__);
156 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
157 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
158 if (dev->error_state == pci_channel_io_normal)
159 status &= ~mask; /* Clear corresponding nonfatal bits */
161 status &= mask; /* Clear corresponding fatal bits */
162 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
166 * \brief Stop all PCI IO to a given device
167 * @param dev Pointer to Octeon device
169 static void stop_pci_io(struct octeon_device *oct)
171 struct msix_entry *msix_entries;
174 /* No more instructions will be forwarded. */
175 atomic_set(&oct->status, OCT_DEV_IN_RESET);
177 for (i = 0; i < oct->ifcount; i++)
178 netif_device_detach(oct->props[i].netdev);
180 /* Disable interrupts */
181 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
183 pcierror_quiesce_device(oct);
185 msix_entries = (struct msix_entry *)oct->msix_entries;
186 for (i = 0; i < oct->num_msix_irqs; i++) {
187 /* clear the affinity_cpumask */
188 irq_set_affinity_hint(msix_entries[i].vector,
190 free_irq(msix_entries[i].vector,
191 &oct->ioq_vector[i]);
193 pci_disable_msix(oct->pci_dev);
194 kfree(oct->msix_entries);
195 oct->msix_entries = NULL;
196 octeon_free_ioq_vector(oct);
198 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
199 lio_get_state_string(&oct->status));
201 /* making it a common function for all OCTEON models */
202 cleanup_aer_uncorrect_error_status(oct->pci_dev);
204 pci_disable_device(oct->pci_dev);
208 * \brief called when PCI error is detected
209 * @param pdev Pointer to PCI device
210 * @param state The current pci connection state
212 * This function is called after a PCI bus error affecting
213 * this device has been detected.
215 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
216 pci_channel_state_t state)
218 struct octeon_device *oct = pci_get_drvdata(pdev);
220 /* Non-correctable Non-fatal errors */
221 if (state == pci_channel_io_normal) {
222 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
223 cleanup_aer_uncorrect_error_status(oct->pci_dev);
224 return PCI_ERS_RESULT_CAN_RECOVER;
227 /* Non-correctable Fatal errors */
228 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
231 return PCI_ERS_RESULT_DISCONNECT;
234 /* For PCI-E Advanced Error Recovery (AER) Interface */
235 static const struct pci_error_handlers liquidio_vf_err_handler = {
236 .error_detected = liquidio_pcie_error_detected,
239 static const struct pci_device_id liquidio_vf_pci_tbl[] = {
241 PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID,
242 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
248 MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl);
250 static struct pci_driver liquidio_vf_pci_driver = {
251 .name = "LiquidIO_VF",
252 .id_table = liquidio_vf_pci_tbl,
253 .probe = liquidio_vf_probe,
254 .remove = liquidio_vf_remove,
255 .err_handler = &liquidio_vf_err_handler, /* For AER */
259 * \brief Print link information
260 * @param netdev network device
262 static void print_link_info(struct net_device *netdev)
264 struct lio *lio = GET_LIO(netdev);
266 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
267 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
268 struct oct_link_info *linfo = &lio->linfo;
270 if (linfo->link.s.link_up) {
271 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
273 (linfo->link.s.duplex) ? "Full" : "Half");
275 netif_info(lio, link, lio->netdev, "Link Down\n");
281 * \brief Routine to notify MTU change
282 * @param work work_struct data structure
284 static void octnet_link_status_change(struct work_struct *work)
286 struct cavium_wk *wk = (struct cavium_wk *)work;
287 struct lio *lio = (struct lio *)wk->ctxptr;
289 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
290 * this API is invoked only when new max-MTU of the interface is
291 * less than current MTU.
294 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
299 * \brief Sets up the mtu status change work
300 * @param netdev network device
302 static int setup_link_status_change_wq(struct net_device *netdev)
304 struct lio *lio = GET_LIO(netdev);
305 struct octeon_device *oct = lio->oct_dev;
307 lio->link_status_wq.wq = alloc_workqueue("link-status",
309 if (!lio->link_status_wq.wq) {
310 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
313 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
314 octnet_link_status_change);
315 lio->link_status_wq.wk.ctxptr = lio;
320 static void cleanup_link_status_change_wq(struct net_device *netdev)
322 struct lio *lio = GET_LIO(netdev);
324 if (lio->link_status_wq.wq) {
325 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
326 destroy_workqueue(lio->link_status_wq.wq);
331 * \brief Update link status
332 * @param netdev network device
333 * @param ls link status structure
335 * Called on receipt of a link status response from the core application to
336 * update each interface's link status.
338 static void update_link_status(struct net_device *netdev,
339 union oct_link_status *ls)
341 struct lio *lio = GET_LIO(netdev);
342 int current_max_mtu = lio->linfo.link.s.mtu;
343 struct octeon_device *oct = lio->oct_dev;
345 if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
346 lio->linfo.link.u64 = ls->u64;
348 print_link_info(netdev);
351 if (lio->linfo.link.s.link_up) {
352 netif_carrier_on(netdev);
355 netif_carrier_off(netdev);
359 if (lio->linfo.link.s.mtu != current_max_mtu) {
360 dev_info(&oct->pci_dev->dev,
361 "Max MTU Changed from %d to %d\n",
362 current_max_mtu, lio->linfo.link.s.mtu);
363 netdev->max_mtu = lio->linfo.link.s.mtu;
366 if (lio->linfo.link.s.mtu < netdev->mtu) {
367 dev_warn(&oct->pci_dev->dev,
368 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
369 netdev->mtu, lio->linfo.link.s.mtu);
370 queue_delayed_work(lio->link_status_wq.wq,
371 &lio->link_status_wq.wk.work, 0);
377 * \brief PCI probe handler
378 * @param pdev PCI device structure
382 liquidio_vf_probe(struct pci_dev *pdev,
383 const struct pci_device_id *ent __attribute__((unused)))
385 struct octeon_device *oct_dev = NULL;
387 oct_dev = octeon_allocate_device(pdev->device,
388 sizeof(struct octeon_device_priv));
391 dev_err(&pdev->dev, "Unable to allocate device\n");
394 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
396 dev_info(&pdev->dev, "Initializing device %x:%x.\n",
397 (u32)pdev->vendor, (u32)pdev->device);
399 /* Assign octeon_device for this device to the private data area. */
400 pci_set_drvdata(pdev, oct_dev);
402 /* set linux specific device pointer */
403 oct_dev->pci_dev = pdev;
405 oct_dev->subsystem_id = pdev->subsystem_vendor |
406 (pdev->subsystem_device << 16);
408 if (octeon_device_init(oct_dev)) {
409 liquidio_vf_remove(pdev);
413 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
419 * \brief PCI FLR for each Octeon device.
420 * @param oct octeon device
422 static void octeon_pci_flr(struct octeon_device *oct)
424 pci_save_state(oct->pci_dev);
426 pci_cfg_access_lock(oct->pci_dev);
428 /* Quiesce the device completely */
429 pci_write_config_word(oct->pci_dev, PCI_COMMAND,
430 PCI_COMMAND_INTX_DISABLE);
432 pcie_flr(oct->pci_dev);
434 pci_cfg_access_unlock(oct->pci_dev);
436 pci_restore_state(oct->pci_dev);
440 *\brief Destroy resources associated with octeon device
441 * @param pdev PCI device structure
444 static void octeon_destroy_resources(struct octeon_device *oct)
446 struct octeon_device_priv *oct_priv =
447 (struct octeon_device_priv *)oct->priv;
448 struct msix_entry *msix_entries;
451 switch (atomic_read(&oct->status)) {
452 case OCT_DEV_RUNNING:
453 case OCT_DEV_CORE_OK:
454 /* No more instructions will be forwarded. */
455 atomic_set(&oct->status, OCT_DEV_IN_RESET);
457 oct->app_mode = CVM_DRV_INVALID_APP;
458 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
459 lio_get_state_string(&oct->status));
461 schedule_timeout_uninterruptible(HZ / 10);
464 case OCT_DEV_HOST_OK:
465 case OCT_DEV_IO_QUEUES_DONE:
466 if (lio_wait_for_instr_fetch(oct))
467 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
469 if (wait_for_pending_requests(oct))
470 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
472 /* Disable the input and output queues now. No more packets will
473 * arrive from Octeon, but we should wait for all packet
474 * processing to finish.
476 oct->fn_list.disable_io_queues(oct);
478 if (lio_wait_for_oq_pkts(oct))
479 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
481 /* Force all requests waiting to be fetched by OCTEON to
484 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
485 struct octeon_instr_queue *iq;
487 if (!(oct->io_qmask.iq & BIT_ULL(i)))
489 iq = oct->instr_queue[i];
491 if (atomic_read(&iq->instr_pending)) {
492 spin_lock_bh(&iq->lock);
494 iq->octeon_read_index = iq->host_write_index;
495 iq->stats.instr_processed +=
496 atomic_read(&iq->instr_pending);
497 lio_process_iq_request_list(oct, iq, 0);
498 spin_unlock_bh(&iq->lock);
502 lio_process_ordered_list(oct, 1);
503 octeon_free_sc_done_list(oct);
504 octeon_free_sc_zombie_list(oct);
507 case OCT_DEV_INTR_SET_DONE:
508 /* Disable interrupts */
509 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
512 msix_entries = (struct msix_entry *)oct->msix_entries;
513 for (i = 0; i < oct->num_msix_irqs; i++) {
514 if (oct->ioq_vector[i].vector) {
515 irq_set_affinity_hint(
516 msix_entries[i].vector,
518 free_irq(msix_entries[i].vector,
519 &oct->ioq_vector[i]);
520 oct->ioq_vector[i].vector = 0;
523 pci_disable_msix(oct->pci_dev);
524 kfree(oct->msix_entries);
525 oct->msix_entries = NULL;
526 kfree(oct->irq_name_storage);
527 oct->irq_name_storage = NULL;
529 /* Soft reset the octeon device before exiting */
530 if (oct->pci_dev->reset_fn)
533 cn23xx_vf_ask_pf_to_do_flr(oct);
536 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
537 octeon_free_ioq_vector(oct);
540 case OCT_DEV_MBOX_SETUP_DONE:
541 oct->fn_list.free_mbox(oct);
544 case OCT_DEV_IN_RESET:
545 case OCT_DEV_DROQ_INIT_DONE:
547 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
548 if (!(oct->io_qmask.oq & BIT_ULL(i)))
550 octeon_delete_droq(oct, i);
554 case OCT_DEV_RESP_LIST_INIT_DONE:
555 octeon_delete_response_list(oct);
558 case OCT_DEV_INSTR_QUEUE_INIT_DONE:
559 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
560 if (!(oct->io_qmask.iq & BIT_ULL(i)))
562 octeon_delete_instr_queue(oct, i);
566 case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
567 octeon_free_sc_buffer_pool(oct);
570 case OCT_DEV_DISPATCH_INIT_DONE:
571 octeon_delete_dispatch_list(oct);
572 cancel_delayed_work_sync(&oct->nic_poll_work.work);
575 case OCT_DEV_PCI_MAP_DONE:
576 octeon_unmap_pci_barx(oct, 0);
577 octeon_unmap_pci_barx(oct, 1);
580 case OCT_DEV_PCI_ENABLE_DONE:
581 pci_clear_master(oct->pci_dev);
582 /* Disable the device, releasing the PCI INT */
583 pci_disable_device(oct->pci_dev);
586 case OCT_DEV_BEGIN_STATE:
587 /* Nothing to be done here either */
591 tasklet_kill(&oct_priv->droq_tasklet);
595 * \brief Send Rx control command
596 * @param lio per-network private data
597 * @param start_stop whether to start or stop
599 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
601 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
602 struct octeon_soft_command *sc;
603 union octnet_cmd *ncmd;
606 if (oct->props[lio->ifidx].rx_on == start_stop)
609 sc = (struct octeon_soft_command *)
610 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
613 ncmd = (union octnet_cmd *)sc->virtdptr;
616 ncmd->s.cmd = OCTNET_CMD_RX_CTL;
617 ncmd->s.param1 = start_stop;
619 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
621 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
623 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
624 OPCODE_NIC_CMD, 0, 0, 0);
626 init_completion(&sc->complete);
627 sc->sc_status = OCTEON_REQUEST_PENDING;
629 retval = octeon_send_soft_command(oct, sc);
630 if (retval == IQ_SEND_FAILED) {
631 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
632 octeon_free_soft_command(oct, sc);
634 /* Sleep on a wait queue till the cond flag indicates that the
635 * response arrived or timed-out.
637 retval = wait_for_sc_completion_timeout(oct, sc, 0);
641 oct->props[lio->ifidx].rx_on = start_stop;
642 WRITE_ONCE(sc->caller_is_done, true);
647 * \brief Destroy NIC device interface
648 * @param oct octeon device
649 * @param ifidx which interface to destroy
651 * Cleanup associated with each interface for an Octeon device when NIC
652 * module is being unloaded or if initialization fails during load.
654 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
656 struct net_device *netdev = oct->props[ifidx].netdev;
657 struct octeon_device_priv *oct_priv =
658 (struct octeon_device_priv *)oct->priv;
659 struct napi_struct *napi, *n;
663 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
668 lio = GET_LIO(netdev);
670 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
672 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
673 liquidio_stop(netdev);
675 if (oct->props[lio->ifidx].napi_enabled == 1) {
676 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
679 oct->props[lio->ifidx].napi_enabled = 0;
681 oct->droq[0]->ops.poll_mode = 0;
685 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
686 netif_napi_del(napi);
688 tasklet_enable(&oct_priv->droq_tasklet);
690 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
691 unregister_netdev(netdev);
693 cleanup_rx_oom_poll_fn(netdev);
695 cleanup_link_status_change_wq(netdev);
697 lio_delete_glists(lio);
701 oct->props[ifidx].gmxport = -1;
703 oct->props[ifidx].netdev = NULL;
707 * \brief Stop complete NIC functionality
708 * @param oct octeon device
710 static int liquidio_stop_nic_module(struct octeon_device *oct)
715 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
717 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
721 spin_lock_bh(&oct->cmd_resp_wqlock);
722 oct->cmd_resp_state = OCT_DRV_OFFLINE;
723 spin_unlock_bh(&oct->cmd_resp_wqlock);
725 for (i = 0; i < oct->ifcount; i++) {
726 lio = GET_LIO(oct->props[i].netdev);
727 for (j = 0; j < oct->num_oqs; j++)
728 octeon_unregister_droq_ops(oct,
729 lio->linfo.rxpciq[j].s.q_no);
732 for (i = 0; i < oct->ifcount; i++)
733 liquidio_destroy_nic_device(oct, i);
735 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
740 * \brief Cleans up resources at unload time
741 * @param pdev PCI device structure
743 static void liquidio_vf_remove(struct pci_dev *pdev)
745 struct octeon_device *oct_dev = pci_get_drvdata(pdev);
747 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
749 if (oct_dev->app_mode == CVM_DRV_NIC_APP)
750 liquidio_stop_nic_module(oct_dev);
752 /* Reset the octeon device and cleanup all memory allocated for
753 * the octeon device by driver.
755 octeon_destroy_resources(oct_dev);
757 dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
759 /* This octeon device has been removed. Update the global
760 * data structure to reflect this. Free the device structure.
762 octeon_free_device_mem(oct_dev);
766 * \brief PCI initialization for each Octeon device.
767 * @param oct octeon device
769 static int octeon_pci_os_setup(struct octeon_device *oct)
771 #ifdef CONFIG_PCI_IOV
772 /* setup PCI stuff first */
773 if (!oct->pci_dev->physfn)
777 if (pci_enable_device(oct->pci_dev)) {
778 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
782 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
783 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
784 pci_disable_device(oct->pci_dev);
788 /* Enable PCI DMA Master. */
789 pci_set_master(oct->pci_dev);
795 * \brief Unmap and free network buffer
798 static void free_netbuf(void *buf)
800 struct octnet_buf_free_info *finfo;
804 finfo = (struct octnet_buf_free_info *)buf;
808 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
815 * \brief Unmap and free gather buffer
818 static void free_netsgbuf(void *buf)
820 struct octnet_buf_free_info *finfo;
821 struct octnic_gather *g;
826 finfo = (struct octnet_buf_free_info *)buf;
830 frags = skb_shinfo(skb)->nr_frags;
832 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
833 g->sg[0].ptr[0], (skb->len - skb->data_len),
838 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
840 pci_unmap_page((lio->oct_dev)->pci_dev,
841 g->sg[(i >> 2)].ptr[(i & 3)],
842 skb_frag_size(frag), DMA_TO_DEVICE);
846 iq = skb_iq(lio->oct_dev, skb);
848 spin_lock(&lio->glist_lock[iq]);
849 list_add_tail(&g->list, &lio->glist[iq]);
850 spin_unlock(&lio->glist_lock[iq]);
856 * \brief Unmap and free gather buffer with response
859 static void free_netsgbuf_with_resp(void *buf)
861 struct octnet_buf_free_info *finfo;
862 struct octeon_soft_command *sc;
863 struct octnic_gather *g;
868 sc = (struct octeon_soft_command *)buf;
869 skb = (struct sk_buff *)sc->callback_arg;
870 finfo = (struct octnet_buf_free_info *)&skb->cb;
874 frags = skb_shinfo(skb)->nr_frags;
876 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
877 g->sg[0].ptr[0], (skb->len - skb->data_len),
882 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
884 pci_unmap_page((lio->oct_dev)->pci_dev,
885 g->sg[(i >> 2)].ptr[(i & 3)],
886 skb_frag_size(frag), DMA_TO_DEVICE);
890 iq = skb_iq(lio->oct_dev, skb);
892 spin_lock(&lio->glist_lock[iq]);
893 list_add_tail(&g->list, &lio->glist[iq]);
894 spin_unlock(&lio->glist_lock[iq]);
896 /* Don't free the skb yet */
900 * \brief Net device open for LiquidIO
901 * @param netdev network device
903 static int liquidio_open(struct net_device *netdev)
905 struct lio *lio = GET_LIO(netdev);
906 struct octeon_device *oct = lio->oct_dev;
907 struct octeon_device_priv *oct_priv =
908 (struct octeon_device_priv *)oct->priv;
909 struct napi_struct *napi, *n;
911 if (!oct->props[lio->ifidx].napi_enabled) {
912 tasklet_disable(&oct_priv->droq_tasklet);
914 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
917 oct->props[lio->ifidx].napi_enabled = 1;
919 oct->droq[0]->ops.poll_mode = 1;
922 ifstate_set(lio, LIO_IFSTATE_RUNNING);
924 /* Ready for link status updates */
927 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
930 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
931 lio->stats_wk.ctxptr = lio;
932 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
933 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
935 /* tell Octeon to start forwarding packets to host */
936 send_rx_ctrl_cmd(lio, 1);
938 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
944 * \brief Net device stop for LiquidIO
945 * @param netdev network device
947 static int liquidio_stop(struct net_device *netdev)
949 struct lio *lio = GET_LIO(netdev);
950 struct octeon_device *oct = lio->oct_dev;
951 struct octeon_device_priv *oct_priv =
952 (struct octeon_device_priv *)oct->priv;
953 struct napi_struct *napi, *n;
955 /* tell Octeon to stop forwarding packets to host */
956 send_rx_ctrl_cmd(lio, 0);
958 netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
959 /* Inform that netif carrier is down */
961 lio->linfo.link.s.link_up = 0;
963 netif_carrier_off(netdev);
966 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
970 /* Wait for any pending Rx descriptors */
971 if (lio_wait_for_clean_oq(oct))
972 netif_info(lio, rx_err, lio->netdev,
973 "Proceeding with stop interface after partial RX desc processing\n");
975 if (oct->props[lio->ifidx].napi_enabled == 1) {
976 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
979 oct->props[lio->ifidx].napi_enabled = 0;
981 oct->droq[0]->ops.poll_mode = 0;
983 tasklet_enable(&oct_priv->droq_tasklet);
986 cancel_delayed_work_sync(&lio->stats_wk.work);
988 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
994 * \brief Converts a mask based on net device flags
995 * @param netdev network device
997 * This routine generates a octnet_ifflags mask from the net device flags
998 * received from the OS.
1000 static enum octnet_ifflags get_new_flags(struct net_device *netdev)
1002 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1004 if (netdev->flags & IFF_PROMISC)
1005 f |= OCTNET_IFFLAG_PROMISC;
1007 if (netdev->flags & IFF_ALLMULTI)
1008 f |= OCTNET_IFFLAG_ALLMULTI;
1010 if (netdev->flags & IFF_MULTICAST) {
1011 f |= OCTNET_IFFLAG_MULTICAST;
1013 /* Accept all multicast addresses if there are more than we
1016 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1017 f |= OCTNET_IFFLAG_ALLMULTI;
1020 if (netdev->flags & IFF_BROADCAST)
1021 f |= OCTNET_IFFLAG_BROADCAST;
1026 static void liquidio_set_uc_list(struct net_device *netdev)
1028 struct lio *lio = GET_LIO(netdev);
1029 struct octeon_device *oct = lio->oct_dev;
1030 struct octnic_ctrl_pkt nctrl;
1031 struct netdev_hw_addr *ha;
1034 if (lio->netdev_uc_count == netdev_uc_count(netdev))
1037 if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) {
1038 dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n");
1042 lio->netdev_uc_count = netdev_uc_count(netdev);
1044 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1045 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST;
1046 nctrl.ncmd.s.more = lio->netdev_uc_count;
1047 nctrl.ncmd.s.param1 = oct->vf_num;
1048 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1049 nctrl.netpndev = (u64)netdev;
1050 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1052 /* copy all the addresses into the udd */
1053 mac = &nctrl.udd[0];
1054 netdev_for_each_uc_addr(ha, netdev) {
1055 ether_addr_copy(((u8 *)mac) + 2, ha->addr);
1059 octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1063 * \brief Net device set_multicast_list
1064 * @param netdev network device
1066 static void liquidio_set_mcast_list(struct net_device *netdev)
1068 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1069 struct lio *lio = GET_LIO(netdev);
1070 struct octeon_device *oct = lio->oct_dev;
1071 struct octnic_ctrl_pkt nctrl;
1072 struct netdev_hw_addr *ha;
1076 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1078 /* Create a ctrl pkt command to be sent to core app. */
1080 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1081 nctrl.ncmd.s.param1 = get_new_flags(netdev);
1082 nctrl.ncmd.s.param2 = mc_count;
1083 nctrl.ncmd.s.more = mc_count;
1084 nctrl.netpndev = (u64)netdev;
1085 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1087 /* copy all the addresses into the udd */
1089 netdev_for_each_mc_addr(ha, netdev) {
1091 ether_addr_copy(((u8 *)mc) + 2, ha->addr);
1092 /* no need to swap bytes */
1093 if (++mc > &nctrl.udd[mc_count])
1097 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1099 /* Apparently, any activity in this call from the kernel has to
1100 * be atomic. So we won't wait for response.
1103 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1105 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1109 liquidio_set_uc_list(netdev);
1113 * \brief Net device set_mac_address
1114 * @param netdev network device
1116 static int liquidio_set_mac(struct net_device *netdev, void *p)
1118 struct sockaddr *addr = (struct sockaddr *)p;
1119 struct lio *lio = GET_LIO(netdev);
1120 struct octeon_device *oct = lio->oct_dev;
1121 struct octnic_ctrl_pkt nctrl;
1124 if (!is_valid_ether_addr(addr->sa_data))
1125 return -EADDRNOTAVAIL;
1127 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
1130 if (lio->linfo.macaddr_is_admin_asgnd)
1133 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1136 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
1137 nctrl.ncmd.s.param1 = 0;
1138 nctrl.ncmd.s.more = 1;
1139 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1140 nctrl.netpndev = (u64)netdev;
1143 /* The MAC Address is presented in network byte order. */
1144 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data);
1146 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1148 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
1152 if (nctrl.sc_status ==
1153 FIRMWARE_STATUS_CODE(OCTEON_REQUEST_NO_PERMISSION)) {
1154 dev_err(&oct->pci_dev->dev, "MAC Address change failed: no permission\n");
1158 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1159 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
1165 liquidio_get_stats64(struct net_device *netdev,
1166 struct rtnl_link_stats64 *lstats)
1168 struct lio *lio = GET_LIO(netdev);
1169 struct octeon_device *oct;
1170 u64 pkts = 0, drop = 0, bytes = 0;
1171 struct oct_droq_stats *oq_stats;
1172 struct oct_iq_stats *iq_stats;
1173 int i, iq_no, oq_no;
1177 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1180 for (i = 0; i < oct->num_iqs; i++) {
1181 iq_no = lio->linfo.txpciq[i].s.q_no;
1182 iq_stats = &oct->instr_queue[iq_no]->stats;
1183 pkts += iq_stats->tx_done;
1184 drop += iq_stats->tx_dropped;
1185 bytes += iq_stats->tx_tot_bytes;
1188 lstats->tx_packets = pkts;
1189 lstats->tx_bytes = bytes;
1190 lstats->tx_dropped = drop;
1196 for (i = 0; i < oct->num_oqs; i++) {
1197 oq_no = lio->linfo.rxpciq[i].s.q_no;
1198 oq_stats = &oct->droq[oq_no]->stats;
1199 pkts += oq_stats->rx_pkts_received;
1200 drop += (oq_stats->rx_dropped +
1201 oq_stats->dropped_nodispatch +
1202 oq_stats->dropped_toomany +
1203 oq_stats->dropped_nomem);
1204 bytes += oq_stats->rx_bytes_received;
1207 lstats->rx_bytes = bytes;
1208 lstats->rx_packets = pkts;
1209 lstats->rx_dropped = drop;
1211 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
1213 /* detailed rx_errors: */
1214 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
1215 /* recved pkt with crc error */
1216 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
1217 /* recv'd frame alignment error */
1218 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
1220 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
1221 lstats->rx_frame_errors;
1223 /* detailed tx_errors */
1224 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
1225 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
1227 lstats->tx_errors = lstats->tx_aborted_errors +
1228 lstats->tx_carrier_errors;
1232 * \brief Handler for SIOCSHWTSTAMP ioctl
1233 * @param netdev network device
1234 * @param ifr interface request
1235 * @param cmd command
1237 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
1239 struct lio *lio = GET_LIO(netdev);
1240 struct hwtstamp_config conf;
1242 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
1248 switch (conf.tx_type) {
1249 case HWTSTAMP_TX_ON:
1250 case HWTSTAMP_TX_OFF:
1256 switch (conf.rx_filter) {
1257 case HWTSTAMP_FILTER_NONE:
1259 case HWTSTAMP_FILTER_ALL:
1260 case HWTSTAMP_FILTER_SOME:
1261 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1262 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1263 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1264 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1265 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1266 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1267 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1268 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1269 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1270 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1271 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1272 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1273 case HWTSTAMP_FILTER_NTP_ALL:
1274 conf.rx_filter = HWTSTAMP_FILTER_ALL;
1280 if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
1281 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1284 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1286 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
1290 * \brief ioctl handler
1291 * @param netdev network device
1292 * @param ifr interface request
1293 * @param cmd command
1295 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1299 return hwtstamp_ioctl(netdev, ifr);
1305 static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
1307 struct sk_buff *skb = (struct sk_buff *)buf;
1308 struct octnet_buf_free_info *finfo;
1309 struct oct_timestamp_resp *resp;
1310 struct octeon_soft_command *sc;
1313 finfo = (struct octnet_buf_free_info *)skb->cb;
1317 resp = (struct oct_timestamp_resp *)sc->virtrptr;
1319 if (status != OCTEON_REQUEST_DONE) {
1320 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
1321 CVM_CAST64(status));
1322 resp->timestamp = 0;
1325 octeon_swap_8B_data(&resp->timestamp, 1);
1327 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
1328 struct skb_shared_hwtstamps ts;
1329 u64 ns = resp->timestamp;
1331 netif_info(lio, tx_done, lio->netdev,
1332 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
1333 skb, (unsigned long long)ns);
1334 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
1335 skb_tstamp_tx(skb, &ts);
1338 octeon_free_soft_command(oct, sc);
1339 tx_buffer_free(skb);
1342 /* \brief Send a data packet that will be timestamped
1343 * @param oct octeon device
1344 * @param ndata pointer to network data
1345 * @param finfo pointer to private network data
1347 static int send_nic_timestamp_pkt(struct octeon_device *oct,
1348 struct octnic_data_pkt *ndata,
1349 struct octnet_buf_free_info *finfo,
1352 struct octeon_soft_command *sc;
1360 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
1361 sizeof(struct oct_timestamp_resp));
1365 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
1366 return IQ_SEND_FAILED;
1369 if (ndata->reqtype == REQTYPE_NORESP_NET)
1370 ndata->reqtype = REQTYPE_RESP_NET;
1371 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
1372 ndata->reqtype = REQTYPE_RESP_NET_SG;
1374 sc->callback = handle_timestamp;
1375 sc->callback_arg = finfo->skb;
1376 sc->iq_no = ndata->q_no;
1378 len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz;
1380 ring_doorbell = !xmit_more;
1382 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
1383 sc, len, ndata->reqtype);
1385 if (retval == IQ_SEND_FAILED) {
1386 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
1388 octeon_free_soft_command(oct, sc);
1390 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
1396 /** \brief Transmit networks packets to the Octeon interface
1397 * @param skbuff skbuff struct to be passed to network layer.
1398 * @param netdev pointer to network device
1399 * @returns whether the packet was transmitted to the device okay or not
1400 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
1402 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
1404 struct octnet_buf_free_info *finfo;
1405 union octnic_cmd_setup cmdsetup;
1406 struct octnic_data_pkt ndata;
1407 struct octeon_instr_irh *irh;
1408 struct oct_iq_stats *stats;
1409 struct octeon_device *oct;
1410 int q_idx = 0, iq_no = 0;
1411 union tx_info *tx_info;
1419 lio = GET_LIO(netdev);
1422 q_idx = skb_iq(lio->oct_dev, skb);
1424 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
1426 stats = &oct->instr_queue[iq_no]->stats;
1428 /* Check for all conditions in which the current packet cannot be
1431 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
1432 (!lio->linfo.link.s.link_up) || (skb->len <= 0)) {
1433 netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n",
1434 lio->linfo.link.s.link_up);
1435 goto lio_xmit_failed;
1438 /* Use space in skb->cb to store info used to unmap and
1441 finfo = (struct octnet_buf_free_info *)skb->cb;
1446 /* Prepare the attributes for the data to be passed to OSI. */
1447 memset(&ndata, 0, sizeof(struct octnic_data_pkt));
1453 if (octnet_iq_is_full(oct, ndata.q_no)) {
1454 /* defer sending if queue is full */
1455 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
1457 stats->tx_iq_busy++;
1458 return NETDEV_TX_BUSY;
1461 ndata.datasize = skb->len;
1464 cmdsetup.s.iq_no = iq_no;
1466 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1467 if (skb->encapsulation) {
1468 cmdsetup.s.tnl_csum = 1;
1471 cmdsetup.s.transport_csum = 1;
1474 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1475 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1476 cmdsetup.s.timestamp = 1;
1479 if (!skb_shinfo(skb)->nr_frags) {
1480 cmdsetup.s.u.datasize = skb->len;
1481 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1482 /* Offload checksum calculation for TCP/UDP packets */
1483 dptr = dma_map_single(&oct->pci_dev->dev,
1487 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
1488 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
1490 return NETDEV_TX_BUSY;
1493 ndata.cmd.cmd3.dptr = dptr;
1495 ndata.reqtype = REQTYPE_NORESP_NET;
1499 struct octnic_gather *g;
1502 spin_lock(&lio->glist_lock[q_idx]);
1503 g = (struct octnic_gather *)
1504 lio_list_delete_head(&lio->glist[q_idx]);
1505 spin_unlock(&lio->glist_lock[q_idx]);
1508 netif_info(lio, tx_err, lio->netdev,
1509 "Transmit scatter gather: glist null!\n");
1510 goto lio_xmit_failed;
1513 cmdsetup.s.gather = 1;
1514 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
1515 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1517 memset(g->sg, 0, g->sg_size);
1519 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
1521 (skb->len - skb->data_len),
1523 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
1524 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
1526 return NETDEV_TX_BUSY;
1528 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
1530 frags = skb_shinfo(skb)->nr_frags;
1533 frag = &skb_shinfo(skb)->frags[i - 1];
1535 g->sg[(i >> 2)].ptr[(i & 3)] =
1536 skb_frag_dma_map(&oct->pci_dev->dev,
1537 frag, 0, skb_frag_size(frag),
1539 if (dma_mapping_error(&oct->pci_dev->dev,
1540 g->sg[i >> 2].ptr[i & 3])) {
1541 dma_unmap_single(&oct->pci_dev->dev,
1543 skb->len - skb->data_len,
1545 for (j = 1; j < i; j++) {
1546 frag = &skb_shinfo(skb)->frags[j - 1];
1547 dma_unmap_page(&oct->pci_dev->dev,
1548 g->sg[j >> 2].ptr[j & 3],
1549 skb_frag_size(frag),
1552 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
1554 return NETDEV_TX_BUSY;
1557 add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
1562 dptr = g->sg_dma_ptr;
1564 ndata.cmd.cmd3.dptr = dptr;
1568 ndata.reqtype = REQTYPE_NORESP_NET_SG;
1571 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
1572 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
1574 if (skb_shinfo(skb)->gso_size) {
1575 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
1576 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
1579 /* HW insert VLAN tag */
1580 if (skb_vlan_tag_present(skb)) {
1581 irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
1582 irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
1585 xmit_more = netdev_xmit_more();
1587 if (unlikely(cmdsetup.s.timestamp))
1588 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
1590 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
1591 if (status == IQ_SEND_FAILED)
1592 goto lio_xmit_failed;
1594 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
1596 if (status == IQ_SEND_STOP) {
1597 dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n",
1599 netif_stop_subqueue(netdev, q_idx);
1602 netif_trans_update(netdev);
1604 if (tx_info->s.gso_segs)
1605 stats->tx_done += tx_info->s.gso_segs;
1608 stats->tx_tot_bytes += ndata.datasize;
1610 return NETDEV_TX_OK;
1613 stats->tx_dropped++;
1614 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
1615 iq_no, stats->tx_dropped);
1617 dma_unmap_single(&oct->pci_dev->dev, dptr,
1618 ndata.datasize, DMA_TO_DEVICE);
1620 octeon_ring_doorbell_locked(oct, iq_no);
1622 tx_buffer_free(skb);
1623 return NETDEV_TX_OK;
1626 /** \brief Network device Tx timeout
1627 * @param netdev pointer to network device
1629 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1633 lio = GET_LIO(netdev);
1635 netif_info(lio, tx_err, lio->netdev,
1636 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
1637 netdev->stats.tx_dropped);
1638 netif_trans_update(netdev);
1643 liquidio_vlan_rx_add_vid(struct net_device *netdev,
1644 __be16 proto __attribute__((unused)), u16 vid)
1646 struct lio *lio = GET_LIO(netdev);
1647 struct octeon_device *oct = lio->oct_dev;
1648 struct octnic_ctrl_pkt nctrl;
1651 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1654 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
1655 nctrl.ncmd.s.param1 = vid;
1656 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1657 nctrl.netpndev = (u64)netdev;
1658 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1660 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1662 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
1671 liquidio_vlan_rx_kill_vid(struct net_device *netdev,
1672 __be16 proto __attribute__((unused)), u16 vid)
1674 struct lio *lio = GET_LIO(netdev);
1675 struct octeon_device *oct = lio->oct_dev;
1676 struct octnic_ctrl_pkt nctrl;
1679 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1682 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
1683 nctrl.ncmd.s.param1 = vid;
1684 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1685 nctrl.netpndev = (u64)netdev;
1686 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1688 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1690 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
1698 /** Sending command to enable/disable RX checksum offload
1699 * @param netdev pointer to network device
1700 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
1701 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
1702 * OCTNET_CMD_RXCSUM_DISABLE
1703 * @returns SUCCESS or FAILURE
1705 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
1708 struct lio *lio = GET_LIO(netdev);
1709 struct octeon_device *oct = lio->oct_dev;
1710 struct octnic_ctrl_pkt nctrl;
1713 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1716 nctrl.ncmd.s.cmd = command;
1717 nctrl.ncmd.s.param1 = rx_cmd;
1718 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1719 nctrl.netpndev = (u64)netdev;
1720 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1722 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1724 dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n",
1732 /** Sending command to add/delete VxLAN UDP port to firmware
1733 * @param netdev pointer to network device
1734 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
1735 * @param vxlan_port VxLAN port to be added or deleted
1736 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
1737 * OCTNET_CMD_VXLAN_PORT_DEL
1738 * @returns SUCCESS or FAILURE
1740 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
1741 u16 vxlan_port, u8 vxlan_cmd_bit)
1743 struct lio *lio = GET_LIO(netdev);
1744 struct octeon_device *oct = lio->oct_dev;
1745 struct octnic_ctrl_pkt nctrl;
1748 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1751 nctrl.ncmd.s.cmd = command;
1752 nctrl.ncmd.s.more = vxlan_cmd_bit;
1753 nctrl.ncmd.s.param1 = vxlan_port;
1754 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1755 nctrl.netpndev = (u64)netdev;
1756 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1758 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1760 dev_err(&oct->pci_dev->dev,
1761 "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n",
1769 static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
1770 unsigned int table, unsigned int entry,
1771 struct udp_tunnel_info *ti)
1773 return liquidio_vxlan_port_command(netdev,
1774 OCTNET_CMD_VXLAN_PORT_CONFIG,
1776 OCTNET_CMD_VXLAN_PORT_ADD);
1779 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
1782 struct udp_tunnel_info *ti)
1784 return liquidio_vxlan_port_command(netdev,
1785 OCTNET_CMD_VXLAN_PORT_CONFIG,
1787 OCTNET_CMD_VXLAN_PORT_DEL);
1790 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
1791 .set_port = liquidio_udp_tunnel_set_port,
1792 .unset_port = liquidio_udp_tunnel_unset_port,
1794 { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
1798 /** \brief Net device fix features
1799 * @param netdev pointer to network device
1800 * @param request features requested
1801 * @returns updated features list
1803 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
1804 netdev_features_t request)
1806 struct lio *lio = netdev_priv(netdev);
1808 if ((request & NETIF_F_RXCSUM) &&
1809 !(lio->dev_capability & NETIF_F_RXCSUM))
1810 request &= ~NETIF_F_RXCSUM;
1812 if ((request & NETIF_F_HW_CSUM) &&
1813 !(lio->dev_capability & NETIF_F_HW_CSUM))
1814 request &= ~NETIF_F_HW_CSUM;
1816 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
1817 request &= ~NETIF_F_TSO;
1819 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
1820 request &= ~NETIF_F_TSO6;
1822 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
1823 request &= ~NETIF_F_LRO;
1825 /* Disable LRO if RXCSUM is off */
1826 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
1827 (lio->dev_capability & NETIF_F_LRO))
1828 request &= ~NETIF_F_LRO;
1833 /** \brief Net device set features
1834 * @param netdev pointer to network device
1835 * @param features features to enable/disable
1837 static int liquidio_set_features(struct net_device *netdev,
1838 netdev_features_t features)
1840 struct lio *lio = netdev_priv(netdev);
1842 if (!((netdev->features ^ features) & NETIF_F_LRO))
1845 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
1846 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
1847 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
1848 else if (!(features & NETIF_F_LRO) &&
1849 (lio->dev_capability & NETIF_F_LRO))
1850 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
1851 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
1852 if (!(netdev->features & NETIF_F_RXCSUM) &&
1853 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1854 (features & NETIF_F_RXCSUM))
1855 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
1856 OCTNET_CMD_RXCSUM_ENABLE);
1857 else if ((netdev->features & NETIF_F_RXCSUM) &&
1858 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1859 !(features & NETIF_F_RXCSUM))
1860 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
1861 OCTNET_CMD_RXCSUM_DISABLE);
1866 static const struct net_device_ops lionetdevops = {
1867 .ndo_open = liquidio_open,
1868 .ndo_stop = liquidio_stop,
1869 .ndo_start_xmit = liquidio_xmit,
1870 .ndo_get_stats64 = liquidio_get_stats64,
1871 .ndo_set_mac_address = liquidio_set_mac,
1872 .ndo_set_rx_mode = liquidio_set_mcast_list,
1873 .ndo_tx_timeout = liquidio_tx_timeout,
1874 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
1875 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
1876 .ndo_change_mtu = liquidio_change_mtu,
1877 .ndo_do_ioctl = liquidio_ioctl,
1878 .ndo_fix_features = liquidio_fix_features,
1879 .ndo_set_features = liquidio_set_features,
1880 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
1881 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
1884 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
1886 struct octeon_device *oct = (struct octeon_device *)buf;
1887 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
1888 union oct_link_status *ls;
1892 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
1893 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
1894 recv_pkt->buffer_size[0],
1895 recv_pkt->rh.r_nic_info.gmxport);
1899 gmxport = recv_pkt->rh.r_nic_info.gmxport;
1900 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
1901 OCT_DROQ_INFO_SIZE);
1903 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
1905 for (i = 0; i < oct->ifcount; i++) {
1906 if (oct->props[i].gmxport == gmxport) {
1907 update_link_status(oct->props[i].netdev, ls);
1913 for (i = 0; i < recv_pkt->buffer_count; i++)
1914 recv_buffer_free(recv_pkt->buffer_ptr[i]);
1915 octeon_free_recv_info(recv_info);
1920 * \brief Setup network interfaces
1921 * @param octeon_dev octeon device
1923 * Called during init time for each device. It assumes the NIC
1924 * is already up and running. The link information for each
1925 * interface is passed in link_info.
1927 static int setup_nic_devices(struct octeon_device *octeon_dev)
1929 int retval, num_iqueues, num_oqueues;
1930 u32 resp_size, data_size;
1931 struct liquidio_if_cfg_resp *resp;
1932 struct octeon_soft_command *sc;
1933 union oct_nic_if_cfg if_cfg;
1934 struct octdev_props *props;
1935 struct net_device *netdev;
1936 struct lio_version *vdata;
1937 struct lio *lio = NULL;
1938 u8 mac[ETH_ALEN], i, j;
1941 ifidx_or_pfnum = octeon_dev->pf_num;
1943 /* This is to handle link status changes */
1944 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO,
1945 lio_nic_info, octeon_dev);
1947 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
1948 * They are handled directly.
1950 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
1953 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
1956 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
1957 free_netsgbuf_with_resp);
1959 for (i = 0; i < octeon_dev->ifcount; i++) {
1960 resp_size = sizeof(struct liquidio_if_cfg_resp);
1961 data_size = sizeof(struct lio_version);
1962 sc = (struct octeon_soft_command *)
1963 octeon_alloc_soft_command(octeon_dev, data_size,
1965 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1966 vdata = (struct lio_version *)sc->virtdptr;
1968 *((u64 *)vdata) = 0;
1969 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
1970 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
1971 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
1975 if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf;
1976 if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf;
1977 if_cfg.s.base_queue = 0;
1981 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
1982 OPCODE_NIC_IF_CFG, 0, if_cfg.u64,
1985 init_completion(&sc->complete);
1986 sc->sc_status = OCTEON_REQUEST_PENDING;
1988 retval = octeon_send_soft_command(octeon_dev, sc);
1989 if (retval == IQ_SEND_FAILED) {
1990 dev_err(&octeon_dev->pci_dev->dev,
1991 "iq/oq config failed status: %x\n", retval);
1992 /* Soft instr is freed by driver in case of failure. */
1993 octeon_free_soft_command(octeon_dev, sc);
1997 /* Sleep on a wait queue till the cond flag indicates that the
1998 * response arrived or timed-out.
2000 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
2004 retval = resp->status;
2006 dev_err(&octeon_dev->pci_dev->dev,
2007 "iq/oq config failed, retval = %d\n", retval);
2008 WRITE_ONCE(sc->caller_is_done, true);
2012 snprintf(octeon_dev->fw_info.liquidio_firmware_version,
2014 resp->cfg_info.liquidio_firmware_version);
2016 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
2017 (sizeof(struct liquidio_if_cfg_info)) >> 3);
2019 num_iqueues = hweight64(resp->cfg_info.iqmask);
2020 num_oqueues = hweight64(resp->cfg_info.oqmask);
2022 if (!(num_iqueues) || !(num_oqueues)) {
2023 dev_err(&octeon_dev->pci_dev->dev,
2024 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
2025 resp->cfg_info.iqmask, resp->cfg_info.oqmask);
2026 WRITE_ONCE(sc->caller_is_done, true);
2027 goto setup_nic_dev_done;
2029 dev_dbg(&octeon_dev->pci_dev->dev,
2030 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
2031 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
2032 num_iqueues, num_oqueues);
2034 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
2037 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
2038 WRITE_ONCE(sc->caller_is_done, true);
2039 goto setup_nic_dev_done;
2042 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
2044 /* Associate the routines that will handle different
2047 netdev->netdev_ops = &lionetdevops;
2049 lio = GET_LIO(netdev);
2051 memset(lio, 0, sizeof(struct lio));
2053 lio->ifidx = ifidx_or_pfnum;
2055 props = &octeon_dev->props[i];
2056 props->gmxport = resp->cfg_info.linfo.gmxport;
2057 props->netdev = netdev;
2059 lio->linfo.num_rxpciq = num_oqueues;
2060 lio->linfo.num_txpciq = num_iqueues;
2062 for (j = 0; j < num_oqueues; j++) {
2063 lio->linfo.rxpciq[j].u64 =
2064 resp->cfg_info.linfo.rxpciq[j].u64;
2066 for (j = 0; j < num_iqueues; j++) {
2067 lio->linfo.txpciq[j].u64 =
2068 resp->cfg_info.linfo.txpciq[j].u64;
2071 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
2072 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
2073 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
2074 lio->linfo.macaddr_is_admin_asgnd =
2075 resp->cfg_info.linfo.macaddr_is_admin_asgnd;
2076 lio->linfo.macaddr_spoofchk =
2077 resp->cfg_info.linfo.macaddr_spoofchk;
2079 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2081 lio->dev_capability = NETIF_F_HIGHDMA
2082 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2083 | NETIF_F_SG | NETIF_F_RXCSUM
2084 | NETIF_F_TSO | NETIF_F_TSO6
2087 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
2089 /* Copy of transmit encapsulation capabilities:
2090 * TSO, TSO6, Checksums for this device
2092 lio->enc_dev_capability = NETIF_F_IP_CSUM
2094 | NETIF_F_GSO_UDP_TUNNEL
2095 | NETIF_F_HW_CSUM | NETIF_F_SG
2097 | NETIF_F_TSO | NETIF_F_TSO6
2100 netdev->hw_enc_features =
2101 (lio->enc_dev_capability & ~NETIF_F_LRO);
2102 netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
2104 netdev->vlan_features = lio->dev_capability;
2105 /* Add any unchangeable hw features */
2106 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
2107 NETIF_F_HW_VLAN_CTAG_RX |
2108 NETIF_F_HW_VLAN_CTAG_TX;
2110 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
2112 netdev->hw_features = lio->dev_capability;
2113 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2115 /* MTU range: 68 - 16000 */
2116 netdev->min_mtu = LIO_MIN_MTU_SIZE;
2117 netdev->max_mtu = LIO_MAX_MTU_SIZE;
2119 WRITE_ONCE(sc->caller_is_done, true);
2121 /* Point to the properties for octeon device to which this
2122 * interface belongs.
2124 lio->oct_dev = octeon_dev;
2125 lio->octprops = props;
2126 lio->netdev = netdev;
2128 dev_dbg(&octeon_dev->pci_dev->dev,
2129 "if%d gmx: %d hw_addr: 0x%llx\n", i,
2130 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
2132 /* 64-bit swap required on LE machines */
2133 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
2134 for (j = 0; j < ETH_ALEN; j++)
2135 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
2137 /* Copy MAC Address to OS network device structure */
2138 ether_addr_copy(netdev->dev_addr, mac);
2140 if (liquidio_setup_io_queues(octeon_dev, i,
2141 lio->linfo.num_txpciq,
2142 lio->linfo.num_rxpciq)) {
2143 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
2144 goto setup_nic_dev_free;
2147 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
2149 /* For VFs, enable Octeon device interrupts here,
2150 * as this is contingent upon IO queue setup
2152 octeon_dev->fn_list.enable_interrupt(octeon_dev,
2155 /* By default all interfaces on a single Octeon uses the same
2158 lio->txq = lio->linfo.txpciq[0].s.q_no;
2159 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
2161 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
2162 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
2164 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
2165 dev_err(&octeon_dev->pci_dev->dev,
2166 "Gather list allocation failed\n");
2167 goto setup_nic_dev_free;
2170 /* Register ethtool support */
2171 liquidio_set_ethtool_ops(netdev);
2172 if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID)
2173 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
2175 octeon_dev->priv_flags = 0x0;
2177 if (netdev->features & NETIF_F_LRO)
2178 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2179 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2181 if (setup_link_status_change_wq(netdev))
2182 goto setup_nic_dev_free;
2184 if (setup_rx_oom_poll_fn(netdev))
2185 goto setup_nic_dev_free;
2187 /* Register the network device with the OS */
2188 if (register_netdev(netdev)) {
2189 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
2190 goto setup_nic_dev_free;
2193 dev_dbg(&octeon_dev->pci_dev->dev,
2194 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
2195 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2196 netif_carrier_off(netdev);
2197 lio->link_changes++;
2199 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
2201 /* Sending command to firmware to enable Rx checksum offload
2202 * by default at the time of setup of Liquidio driver for
2205 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2206 OCTNET_CMD_RXCSUM_ENABLE);
2207 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
2208 OCTNET_CMD_TXCSUM_ENABLE);
2210 dev_dbg(&octeon_dev->pci_dev->dev,
2211 "NIC ifidx:%d Setup successful\n", i);
2213 octeon_dev->no_speed_setting = 1;
2221 dev_err(&octeon_dev->pci_dev->dev,
2222 "NIC ifidx:%d Setup failed\n", i);
2223 liquidio_destroy_nic_device(octeon_dev, i);
2232 * \brief initialize the NIC
2233 * @param oct octeon device
2235 * This initialization routine is called once the Octeon device application is
2238 static int liquidio_init_nic_module(struct octeon_device *oct)
2240 int num_nic_ports = 1;
2243 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
2245 /* only default iq and oq were initialized
2246 * initialize the rest as well run port_config command for each port
2248 oct->ifcount = num_nic_ports;
2249 memset(oct->props, 0,
2250 sizeof(struct octdev_props) * num_nic_ports);
2252 for (i = 0; i < MAX_OCTEON_LINKS; i++)
2253 oct->props[i].gmxport = -1;
2255 retval = setup_nic_devices(oct);
2257 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
2258 goto octnet_init_failure;
2261 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
2265 octnet_init_failure:
2273 * \brief Device initialization for each Octeon device that is probed
2274 * @param octeon_dev octeon device
2276 static int octeon_device_init(struct octeon_device *oct)
2281 atomic_set(&oct->status, OCT_DEV_BEGIN_STATE);
2283 /* Enable access to the octeon device and make its DMA capability
2286 if (octeon_pci_os_setup(oct))
2288 atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE);
2290 oct->chip_id = OCTEON_CN23XX_VF_VID;
2291 pci_read_config_dword(oct->pci_dev, 8, &rev_id);
2292 oct->rev_id = rev_id & 0xff;
2294 if (cn23xx_setup_octeon_vf_device(oct))
2297 atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE);
2299 oct->app_mode = CVM_DRV_NIC_APP;
2301 /* Initialize the dispatch mechanism used to push packets arriving on
2302 * Octeon Output queues.
2304 if (octeon_init_dispatch_list(oct))
2307 atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE);
2309 if (octeon_set_io_queues_off(oct)) {
2310 dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
2314 if (oct->fn_list.setup_device_regs(oct)) {
2315 dev_err(&oct->pci_dev->dev, "device registers configuration failed\n");
2319 /* Initialize soft command buffer pool */
2320 if (octeon_setup_sc_buffer_pool(oct)) {
2321 dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n");
2324 atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
2326 /* Setup the data structures that manage this Octeon's Input queues. */
2327 if (octeon_setup_instr_queues(oct)) {
2328 dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n");
2331 atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
2333 /* Initialize lists to manage the requests of different types that
2334 * arrive from user & kernel applications for this octeon device.
2336 if (octeon_setup_response_list(oct)) {
2337 dev_err(&oct->pci_dev->dev, "Response list allocation failed\n");
2340 atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE);
2342 if (octeon_setup_output_queues(oct)) {
2343 dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n");
2346 atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);
2348 if (oct->fn_list.setup_mbox(oct)) {
2349 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
2352 atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
2354 if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) {
2355 dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
2358 atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
2360 dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF: %d ioqs\n",
2361 oct->sriov_info.rings_per_vf);
2363 /* Setup the interrupt handler and record the INT SUM register address*/
2364 if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf))
2367 atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
2369 /* ***************************************************************
2370 * The interrupts need to be enabled for the PF<-->VF handshake.
2371 * They are [re]-enabled after the PF<-->VF handshake so that the
2372 * correct OQ tick value is used (i.e. the value retrieved from
2373 * the PF as part of the handshake).
2376 /* Enable Octeon device interrupts */
2377 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2379 if (cn23xx_octeon_pfvf_handshake(oct))
2382 /* Here we [re]-enable the interrupts so that the correct OQ tick value
2383 * is used (i.e. the value that was retrieved during the handshake)
2386 /* Enable Octeon device interrupts */
2387 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2388 /* *************************************************************** */
2390 /* Enable the input and output queues for this Octeon device */
2391 if (oct->fn_list.enable_io_queues(oct)) {
2392 dev_err(&oct->pci_dev->dev, "enabling io queues failed\n");
2396 atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE);
2398 atomic_set(&oct->status, OCT_DEV_HOST_OK);
2400 /* Send Credit for Octeon Output queues. Credits are always sent after
2401 * the output queue is enabled.
2403 for (j = 0; j < oct->num_oqs; j++)
2404 writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg);
2406 /* Packets can start arriving on the output queues from this point. */
2408 atomic_set(&oct->status, OCT_DEV_CORE_OK);
2410 atomic_set(&oct->status, OCT_DEV_RUNNING);
2412 if (liquidio_init_nic_module(oct))
2418 static int __init liquidio_vf_init(void)
2420 octeon_init_device_list(0);
2421 return pci_register_driver(&liquidio_vf_pci_driver);
2424 static void __exit liquidio_vf_exit(void)
2426 pci_unregister_driver(&liquidio_vf_pci_driver);
2428 pr_info("LiquidIO_VF network module is now unloaded\n");
2431 module_init(liquidio_vf_init);
2432 module_exit(liquidio_vf_exit);