1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/firmware.h>
22 #include <net/vxlan.h>
23 #include <linux/kthread.h>
24 #include "liquidio_common.h"
25 #include "octeon_droq.h"
26 #include "octeon_iq.h"
27 #include "response_manager.h"
28 #include "octeon_device.h"
29 #include "octeon_nic.h"
30 #include "octeon_main.h"
31 #include "octeon_network.h"
32 #include "cn66xx_regs.h"
33 #include "cn66xx_device.h"
34 #include "cn68xx_device.h"
35 #include "cn23xx_pf_device.h"
36 #include "liquidio_image.h"
37 #include "lio_vf_rep.h"
39 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
40 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
41 MODULE_LICENSE("GPL");
42 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
43 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
45 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
47 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
49 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
51 static int ddr_timeout = 10000;
52 module_param(ddr_timeout, int, 0644);
53 MODULE_PARM_DESC(ddr_timeout,
54 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
56 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
58 static int debug = -1;
59 module_param(debug, int, 0644);
60 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
62 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
63 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
64 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
66 static u32 console_bitmask;
67 module_param(console_bitmask, int, 0644);
68 MODULE_PARM_DESC(console_bitmask,
69 "Bitmask indicating which consoles have debug output redirected to syslog.");
72 * octeon_console_debug_enabled - determines if a given console has debug enabled.
73 * @console: console to check
74 * Return: 1 = enabled. 0 otherwise
76 static int octeon_console_debug_enabled(u32 console)
78 return (console_bitmask >> (console)) & 0x1;
81 /* Polling interval for determining when NIC application is alive */
82 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
84 /* runtime link query interval */
85 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
86 /* update localtime to octeon firmware every 60 seconds.
87 * make firmware to use same time reference, so that it will be easy to
88 * correlate firmware logged events/errors with host events, for debugging.
90 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
92 /* time to wait for possible in-flight requests in milliseconds */
93 #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
95 struct lio_trusted_vf_ctx {
96 struct completion complete;
100 struct oct_link_status_resp {
102 struct oct_link_info link_info;
106 struct oct_timestamp_resp {
112 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
117 #ifdef __BIG_ENDIAN_BITFIELD
129 /* Octeon device properties to be used by the NIC module.
130 * Each octeon device in the system will be represented
131 * by this structure in the NIC module.
134 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
135 #define OCTNIC_GSO_MAX_SIZE \
136 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
139 struct completion init;
140 struct completion started;
141 struct pci_dev *pci_dev;
146 #ifdef CONFIG_PCI_IOV
147 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
150 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
151 char *prefix, char *suffix);
153 static int octeon_device_init(struct octeon_device *);
154 static int liquidio_stop(struct net_device *netdev);
155 static void liquidio_remove(struct pci_dev *pdev);
156 static int liquidio_probe(struct pci_dev *pdev,
157 const struct pci_device_id *ent);
158 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
161 static struct handshake handshake[MAX_OCTEON_DEVICES];
162 static struct completion first_stage;
164 static void octeon_droq_bh(struct tasklet_struct *t)
168 struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t,
170 struct octeon_device *oct = oct_priv->dev;
172 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
173 if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
175 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
177 lio_enable_irq(oct->droq[q_no], NULL);
179 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
180 /* set time and cnt interrupt thresholds for this DROQ
183 int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
186 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
189 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
194 tasklet_schedule(&oct_priv->droq_tasklet);
197 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
199 struct octeon_device_priv *oct_priv =
200 (struct octeon_device_priv *)oct->priv;
201 int retry = 100, pkt_cnt = 0, pending_pkts = 0;
207 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
208 if (!(oct->io_qmask.oq & BIT_ULL(i)))
210 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
213 pending_pkts += pkt_cnt;
214 tasklet_schedule(&oct_priv->droq_tasklet);
217 schedule_timeout_uninterruptible(1);
219 } while (retry-- && pending_pkts);
225 * force_io_queues_off - Forces all IO queues off on a given device
226 * @oct: Pointer to Octeon device
228 static void force_io_queues_off(struct octeon_device *oct)
230 if ((oct->chip_id == OCTEON_CN66XX) ||
231 (oct->chip_id == OCTEON_CN68XX)) {
232 /* Reset the Enable bits for Input Queues. */
233 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
235 /* Reset the Enable bits for Output Queues. */
236 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
241 * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc
242 * @oct: Pointer to Octeon device
244 static inline void pcierror_quiesce_device(struct octeon_device *oct)
248 /* Disable the input and output queues now. No more packets will
249 * arrive from Octeon, but we should wait for all packet processing
252 force_io_queues_off(oct);
254 /* To allow for in-flight requests */
255 schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
257 if (wait_for_pending_requests(oct))
258 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
260 /* Force all requests waiting to be fetched by OCTEON to complete. */
261 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
262 struct octeon_instr_queue *iq;
264 if (!(oct->io_qmask.iq & BIT_ULL(i)))
266 iq = oct->instr_queue[i];
268 if (atomic_read(&iq->instr_pending)) {
269 spin_lock_bh(&iq->lock);
271 iq->octeon_read_index = iq->host_write_index;
272 iq->stats.instr_processed +=
273 atomic_read(&iq->instr_pending);
274 lio_process_iq_request_list(oct, iq, 0);
275 spin_unlock_bh(&iq->lock);
279 /* Force all pending ordered list requests to time out. */
280 lio_process_ordered_list(oct, 1);
282 /* We do not need to wait for output queue packets to be processed. */
286 * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status
287 * @dev: Pointer to PCI device
289 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
294 pr_info("%s :\n", __func__);
296 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
297 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
298 if (dev->error_state == pci_channel_io_normal)
299 status &= ~mask; /* Clear corresponding nonfatal bits */
301 status &= mask; /* Clear corresponding fatal bits */
302 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
306 * stop_pci_io - Stop all PCI IO to a given device
307 * @oct: Pointer to Octeon device
309 static void stop_pci_io(struct octeon_device *oct)
311 /* No more instructions will be forwarded. */
312 atomic_set(&oct->status, OCT_DEV_IN_RESET);
314 pci_disable_device(oct->pci_dev);
316 /* Disable interrupts */
317 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
319 pcierror_quiesce_device(oct);
321 /* Release the interrupt line */
322 free_irq(oct->pci_dev->irq, oct);
324 if (oct->flags & LIO_FLAG_MSI_ENABLED)
325 pci_disable_msi(oct->pci_dev);
327 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
328 lio_get_state_string(&oct->status));
330 /* making it a common function for all OCTEON models */
331 cleanup_aer_uncorrect_error_status(oct->pci_dev);
335 * liquidio_pcie_error_detected - called when PCI error is detected
336 * @pdev: Pointer to PCI device
337 * @state: The current pci connection state
339 * This function is called after a PCI bus error affecting
340 * this device has been detected.
342 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
343 pci_channel_state_t state)
345 struct octeon_device *oct = pci_get_drvdata(pdev);
347 /* Non-correctable Non-fatal errors */
348 if (state == pci_channel_io_normal) {
349 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
350 cleanup_aer_uncorrect_error_status(oct->pci_dev);
351 return PCI_ERS_RESULT_CAN_RECOVER;
354 /* Non-correctable Fatal errors */
355 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
358 /* Always return a DISCONNECT. There is no support for recovery but only
359 * for a clean shutdown.
361 return PCI_ERS_RESULT_DISCONNECT;
365 * liquidio_pcie_mmio_enabled - mmio handler
366 * @pdev: Pointer to PCI device
368 static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev)
370 /* We should never hit this since we never ask for a reset for a Fatal
371 * Error. We always return DISCONNECT in io_error above.
372 * But play safe and return RECOVERED for now.
374 return PCI_ERS_RESULT_RECOVERED;
378 * liquidio_pcie_slot_reset - called after the pci bus has been reset.
379 * @pdev: Pointer to PCI device
381 * Restart the card from scratch, as if from a cold-boot. Implementation
382 * resembles the first-half of the octeon_resume routine.
384 static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev)
386 /* We should never hit this since we never ask for a reset for a Fatal
387 * Error. We always return DISCONNECT in io_error above.
388 * But play safe and return RECOVERED for now.
390 return PCI_ERS_RESULT_RECOVERED;
394 * liquidio_pcie_resume - called when traffic can start flowing again.
395 * @pdev: Pointer to PCI device
397 * This callback is called when the error recovery driver tells us that
398 * its OK to resume normal operation. Implementation resembles the
399 * second-half of the octeon_resume routine.
401 static void liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev)
403 /* Nothing to be done here. */
406 #define liquidio_suspend NULL
407 #define liquidio_resume NULL
409 /* For PCI-E Advanced Error Recovery (AER) Interface */
410 static const struct pci_error_handlers liquidio_err_handler = {
411 .error_detected = liquidio_pcie_error_detected,
412 .mmio_enabled = liquidio_pcie_mmio_enabled,
413 .slot_reset = liquidio_pcie_slot_reset,
414 .resume = liquidio_pcie_resume,
417 static const struct pci_device_id liquidio_pci_tbl[] = {
419 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
422 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
425 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
431 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
433 static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume);
435 static struct pci_driver liquidio_pci_driver = {
437 .id_table = liquidio_pci_tbl,
438 .probe = liquidio_probe,
439 .remove = liquidio_remove,
440 .err_handler = &liquidio_err_handler, /* For AER */
441 .driver.pm = &liquidio_pm_ops,
442 #ifdef CONFIG_PCI_IOV
443 .sriov_configure = liquidio_enable_sriov,
448 * liquidio_init_pci - register PCI driver
450 static int liquidio_init_pci(void)
452 return pci_register_driver(&liquidio_pci_driver);
456 * liquidio_deinit_pci - unregister PCI driver
458 static void liquidio_deinit_pci(void)
460 pci_unregister_driver(&liquidio_pci_driver);
464 * check_txq_status - Check Tx queue status, and take appropriate action
465 * @lio: per-network private data
466 * Return: 0 if full, number of queues woken up otherwise
468 static inline int check_txq_status(struct lio *lio)
470 int numqs = lio->netdev->real_num_tx_queues;
474 /* check each sub-queue state */
475 for (q = 0; q < numqs; q++) {
476 iq = lio->linfo.txpciq[q %
477 lio->oct_dev->num_iqs].s.q_no;
478 if (octnet_iq_is_full(lio->oct_dev, iq))
480 if (__netif_subqueue_stopped(lio->netdev, q)) {
481 netif_wake_subqueue(lio->netdev, q);
482 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
492 * print_link_info - Print link information
493 * @netdev: network device
495 static void print_link_info(struct net_device *netdev)
497 struct lio *lio = GET_LIO(netdev);
499 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
500 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
501 struct oct_link_info *linfo = &lio->linfo;
503 if (linfo->link.s.link_up) {
504 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
506 (linfo->link.s.duplex) ? "Full" : "Half");
508 netif_info(lio, link, lio->netdev, "Link Down\n");
514 * octnet_link_status_change - Routine to notify MTU change
515 * @work: work_struct data structure
517 static void octnet_link_status_change(struct work_struct *work)
519 struct cavium_wk *wk = (struct cavium_wk *)work;
520 struct lio *lio = (struct lio *)wk->ctxptr;
522 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
523 * this API is invoked only when new max-MTU of the interface is
524 * less than current MTU.
527 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
532 * setup_link_status_change_wq - Sets up the mtu status change work
533 * @netdev: network device
535 static inline int setup_link_status_change_wq(struct net_device *netdev)
537 struct lio *lio = GET_LIO(netdev);
538 struct octeon_device *oct = lio->oct_dev;
540 lio->link_status_wq.wq = alloc_workqueue("link-status",
542 if (!lio->link_status_wq.wq) {
543 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
546 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
547 octnet_link_status_change);
548 lio->link_status_wq.wk.ctxptr = lio;
553 static inline void cleanup_link_status_change_wq(struct net_device *netdev)
555 struct lio *lio = GET_LIO(netdev);
557 if (lio->link_status_wq.wq) {
558 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
559 destroy_workqueue(lio->link_status_wq.wq);
564 * update_link_status - Update link status
565 * @netdev: network device
566 * @ls: link status structure
568 * Called on receipt of a link status response from the core application to
569 * update each interface's link status.
571 static inline void update_link_status(struct net_device *netdev,
572 union oct_link_status *ls)
574 struct lio *lio = GET_LIO(netdev);
575 int changed = (lio->linfo.link.u64 != ls->u64);
576 int current_max_mtu = lio->linfo.link.s.mtu;
577 struct octeon_device *oct = lio->oct_dev;
579 dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
580 __func__, lio->linfo.link.u64, ls->u64);
581 lio->linfo.link.u64 = ls->u64;
583 if ((lio->intf_open) && (changed)) {
584 print_link_info(netdev);
587 if (lio->linfo.link.s.link_up) {
588 dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
589 netif_carrier_on(netdev);
592 dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
593 netif_carrier_off(netdev);
596 if (lio->linfo.link.s.mtu != current_max_mtu) {
597 netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
598 current_max_mtu, lio->linfo.link.s.mtu);
599 netdev->max_mtu = lio->linfo.link.s.mtu;
601 if (lio->linfo.link.s.mtu < netdev->mtu) {
602 dev_warn(&oct->pci_dev->dev,
603 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
604 netdev->mtu, lio->linfo.link.s.mtu);
605 queue_delayed_work(lio->link_status_wq.wq,
606 &lio->link_status_wq.wk.work, 0);
612 * lio_sync_octeon_time - send latest localtime to octeon firmware so that
613 * firmware will correct it's time, in case there is a time skew
615 * @work: work scheduled to send time update to octeon firmware
617 static void lio_sync_octeon_time(struct work_struct *work)
619 struct cavium_wk *wk = (struct cavium_wk *)work;
620 struct lio *lio = (struct lio *)wk->ctxptr;
621 struct octeon_device *oct = lio->oct_dev;
622 struct octeon_soft_command *sc;
623 struct timespec64 ts;
627 sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0);
629 dev_err(&oct->pci_dev->dev,
630 "Failed to sync time to octeon: soft command allocation failed\n");
634 lt = (struct lio_time *)sc->virtdptr;
636 /* Get time of the day */
637 ktime_get_real_ts64(&ts);
639 lt->nsec = ts.tv_nsec;
640 octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
642 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
643 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
644 OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
646 init_completion(&sc->complete);
647 sc->sc_status = OCTEON_REQUEST_PENDING;
649 ret = octeon_send_soft_command(oct, sc);
650 if (ret == IQ_SEND_FAILED) {
651 dev_err(&oct->pci_dev->dev,
652 "Failed to sync time to octeon: failed to send soft command\n");
653 octeon_free_soft_command(oct, sc);
655 WRITE_ONCE(sc->caller_is_done, true);
658 queue_delayed_work(lio->sync_octeon_time_wq.wq,
659 &lio->sync_octeon_time_wq.wk.work,
660 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
664 * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware
666 * @netdev: network device which should send time update to firmware
668 static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
670 struct lio *lio = GET_LIO(netdev);
671 struct octeon_device *oct = lio->oct_dev;
673 lio->sync_octeon_time_wq.wq =
674 alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
675 if (!lio->sync_octeon_time_wq.wq) {
676 dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
679 INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
680 lio_sync_octeon_time);
681 lio->sync_octeon_time_wq.wk.ctxptr = lio;
682 queue_delayed_work(lio->sync_octeon_time_wq.wq,
683 &lio->sync_octeon_time_wq.wk.work,
684 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
690 * cleanup_sync_octeon_time_wq - destroy wq
692 * @netdev: network device which should send time update to firmware
694 * Stop scheduling and destroy the work created to periodically update local
695 * time to octeon firmware.
697 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
699 struct lio *lio = GET_LIO(netdev);
700 struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
703 cancel_delayed_work_sync(&time_wq->wk.work);
704 destroy_workqueue(time_wq->wq);
708 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
710 struct octeon_device *other_oct;
712 other_oct = lio_get_device(oct->octeon_id + 1);
714 if (other_oct && other_oct->pci_dev) {
715 int oct_busnum, other_oct_busnum;
717 oct_busnum = oct->pci_dev->bus->number;
718 other_oct_busnum = other_oct->pci_dev->bus->number;
720 if (oct_busnum == other_oct_busnum) {
721 int oct_slot, other_oct_slot;
723 oct_slot = PCI_SLOT(oct->pci_dev->devfn);
724 other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
726 if (oct_slot == other_oct_slot)
734 static void disable_all_vf_links(struct octeon_device *oct)
736 struct net_device *netdev;
742 max_vfs = oct->sriov_info.max_vfs;
744 for (i = 0; i < oct->ifcount; i++) {
745 netdev = oct->props[i].netdev;
749 for (vf = 0; vf < max_vfs; vf++)
750 liquidio_set_vf_link_state(netdev, vf,
751 IFLA_VF_LINK_STATE_DISABLE);
755 static int liquidio_watchdog(void *param)
757 bool err_msg_was_printed[LIO_MAX_CORES];
758 u16 mask_of_crashed_or_stuck_cores = 0;
759 bool all_vf_links_are_disabled = false;
760 struct octeon_device *oct = param;
761 struct octeon_device *other_oct;
762 #ifdef CONFIG_MODULE_UNLOAD
763 long refcount, vfs_referencing_pf;
764 u64 vfs_mask1, vfs_mask2;
768 memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
770 while (!kthread_should_stop()) {
771 /* sleep for a couple of seconds so that we don't hog the CPU */
772 set_current_state(TASK_INTERRUPTIBLE);
773 schedule_timeout(msecs_to_jiffies(2000));
775 mask_of_crashed_or_stuck_cores =
776 (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
778 if (!mask_of_crashed_or_stuck_cores)
781 WRITE_ONCE(oct->cores_crashed, true);
782 other_oct = get_other_octeon_device(oct);
784 WRITE_ONCE(other_oct->cores_crashed, true);
786 for (core = 0; core < LIO_MAX_CORES; core++) {
787 bool core_crashed_or_got_stuck;
789 core_crashed_or_got_stuck =
790 (mask_of_crashed_or_stuck_cores
793 if (core_crashed_or_got_stuck &&
794 !err_msg_was_printed[core]) {
795 dev_err(&oct->pci_dev->dev,
796 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
798 err_msg_was_printed[core] = true;
802 if (all_vf_links_are_disabled)
805 disable_all_vf_links(oct);
806 disable_all_vf_links(other_oct);
807 all_vf_links_are_disabled = true;
809 #ifdef CONFIG_MODULE_UNLOAD
810 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
811 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
813 vfs_referencing_pf = hweight64(vfs_mask1);
814 vfs_referencing_pf += hweight64(vfs_mask2);
816 refcount = module_refcount(THIS_MODULE);
817 if (refcount >= vfs_referencing_pf) {
818 while (vfs_referencing_pf) {
819 module_put(THIS_MODULE);
820 vfs_referencing_pf--;
830 * liquidio_probe - PCI probe handler
831 * @pdev: PCI device structure
835 liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *ent)
837 struct octeon_device *oct_dev = NULL;
838 struct handshake *hs;
840 oct_dev = octeon_allocate_device(pdev->device,
841 sizeof(struct octeon_device_priv));
843 dev_err(&pdev->dev, "Unable to allocate device\n");
847 if (pdev->device == OCTEON_CN23XX_PF_VID)
848 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
850 /* Enable PTP for 6XXX Device */
851 if (((pdev->device == OCTEON_CN66XX) ||
852 (pdev->device == OCTEON_CN68XX)))
853 oct_dev->ptp_enable = true;
855 oct_dev->ptp_enable = false;
857 dev_info(&pdev->dev, "Initializing device %x:%x.\n",
858 (u32)pdev->vendor, (u32)pdev->device);
860 /* Assign octeon_device for this device to the private data area. */
861 pci_set_drvdata(pdev, oct_dev);
863 /* set linux specific device pointer */
864 oct_dev->pci_dev = (void *)pdev;
866 oct_dev->subsystem_id = pdev->subsystem_vendor |
867 (pdev->subsystem_device << 16);
869 hs = &handshake[oct_dev->octeon_id];
870 init_completion(&hs->init);
871 init_completion(&hs->started);
874 if (oct_dev->octeon_id == 0)
875 /* first LiquidIO NIC is detected */
876 complete(&first_stage);
878 if (octeon_device_init(oct_dev)) {
880 liquidio_remove(pdev);
884 if (OCTEON_CN23XX_PF(oct_dev)) {
885 u8 bus, device, function;
887 if (atomic_read(oct_dev->adapter_refcount) == 1) {
888 /* Each NIC gets one watchdog kernel thread. The first
889 * PF (of each NIC) that gets pci_driver->probe()'d
890 * creates that thread.
892 bus = pdev->bus->number;
893 device = PCI_SLOT(pdev->devfn);
894 function = PCI_FUNC(pdev->devfn);
895 oct_dev->watchdog_task = kthread_create(
896 liquidio_watchdog, oct_dev,
897 "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
898 if (!IS_ERR(oct_dev->watchdog_task)) {
899 wake_up_process(oct_dev->watchdog_task);
901 oct_dev->watchdog_task = NULL;
902 dev_err(&oct_dev->pci_dev->dev,
903 "failed to create kernel_thread\n");
904 liquidio_remove(pdev);
910 oct_dev->rx_pause = 1;
911 oct_dev->tx_pause = 1;
913 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
918 static bool fw_type_is_auto(void)
920 return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
921 sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
925 * octeon_pci_flr - PCI FLR for each Octeon device.
926 * @oct: octeon device
928 static void octeon_pci_flr(struct octeon_device *oct)
932 pci_save_state(oct->pci_dev);
934 pci_cfg_access_lock(oct->pci_dev);
936 /* Quiesce the device completely */
937 pci_write_config_word(oct->pci_dev, PCI_COMMAND,
938 PCI_COMMAND_INTX_DISABLE);
940 rc = __pci_reset_function_locked(oct->pci_dev);
943 dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
946 pci_cfg_access_unlock(oct->pci_dev);
948 pci_restore_state(oct->pci_dev);
952 * octeon_destroy_resources - Destroy resources associated with octeon device
953 * @oct: octeon device
955 static void octeon_destroy_resources(struct octeon_device *oct)
958 struct msix_entry *msix_entries;
959 struct octeon_device_priv *oct_priv =
960 (struct octeon_device_priv *)oct->priv;
962 struct handshake *hs;
964 switch (atomic_read(&oct->status)) {
965 case OCT_DEV_RUNNING:
966 case OCT_DEV_CORE_OK:
968 /* No more instructions will be forwarded. */
969 atomic_set(&oct->status, OCT_DEV_IN_RESET);
971 oct->app_mode = CVM_DRV_INVALID_APP;
972 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
973 lio_get_state_string(&oct->status));
975 schedule_timeout_uninterruptible(HZ / 10);
978 case OCT_DEV_HOST_OK:
980 case OCT_DEV_CONSOLE_INIT_DONE:
981 /* Remove any consoles */
982 octeon_remove_consoles(oct);
985 case OCT_DEV_IO_QUEUES_DONE:
986 if (lio_wait_for_instr_fetch(oct))
987 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
989 if (wait_for_pending_requests(oct))
990 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
992 /* Disable the input and output queues now. No more packets will
993 * arrive from Octeon, but we should wait for all packet
994 * processing to finish.
996 oct->fn_list.disable_io_queues(oct);
998 if (lio_wait_for_oq_pkts(oct))
999 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1001 /* Force all requests waiting to be fetched by OCTEON to
1004 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1005 struct octeon_instr_queue *iq;
1007 if (!(oct->io_qmask.iq & BIT_ULL(i)))
1009 iq = oct->instr_queue[i];
1011 if (atomic_read(&iq->instr_pending)) {
1012 spin_lock_bh(&iq->lock);
1014 iq->octeon_read_index = iq->host_write_index;
1015 iq->stats.instr_processed +=
1016 atomic_read(&iq->instr_pending);
1017 lio_process_iq_request_list(oct, iq, 0);
1018 spin_unlock_bh(&iq->lock);
1022 lio_process_ordered_list(oct, 1);
1023 octeon_free_sc_done_list(oct);
1024 octeon_free_sc_zombie_list(oct);
1027 case OCT_DEV_INTR_SET_DONE:
1028 /* Disable interrupts */
1029 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1032 msix_entries = (struct msix_entry *)oct->msix_entries;
1033 for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1034 if (oct->ioq_vector[i].vector) {
1035 /* clear the affinity_cpumask */
1036 irq_set_affinity_hint(
1037 msix_entries[i].vector,
1039 free_irq(msix_entries[i].vector,
1040 &oct->ioq_vector[i]);
1041 oct->ioq_vector[i].vector = 0;
1044 /* non-iov vector's argument is oct struct */
1045 free_irq(msix_entries[i].vector, oct);
1047 pci_disable_msix(oct->pci_dev);
1048 kfree(oct->msix_entries);
1049 oct->msix_entries = NULL;
1051 /* Release the interrupt line */
1052 free_irq(oct->pci_dev->irq, oct);
1054 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1055 pci_disable_msi(oct->pci_dev);
1058 kfree(oct->irq_name_storage);
1059 oct->irq_name_storage = NULL;
1062 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1063 if (OCTEON_CN23XX_PF(oct))
1064 octeon_free_ioq_vector(oct);
1067 case OCT_DEV_MBOX_SETUP_DONE:
1068 if (OCTEON_CN23XX_PF(oct))
1069 oct->fn_list.free_mbox(oct);
1072 case OCT_DEV_IN_RESET:
1073 case OCT_DEV_DROQ_INIT_DONE:
1074 /* Wait for any pending operations */
1076 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1077 if (!(oct->io_qmask.oq & BIT_ULL(i)))
1079 octeon_delete_droq(oct, i);
1082 /* Force any pending handshakes to complete */
1083 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1087 handshake[oct->octeon_id].init_ok = 0;
1088 complete(&handshake[oct->octeon_id].init);
1089 handshake[oct->octeon_id].started_ok = 0;
1090 complete(&handshake[oct->octeon_id].started);
1095 case OCT_DEV_RESP_LIST_INIT_DONE:
1096 octeon_delete_response_list(oct);
1099 case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1100 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1101 if (!(oct->io_qmask.iq & BIT_ULL(i)))
1103 octeon_delete_instr_queue(oct, i);
1105 #ifdef CONFIG_PCI_IOV
1106 if (oct->sriov_info.sriov_enabled)
1107 pci_disable_sriov(oct->pci_dev);
1110 case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1111 octeon_free_sc_buffer_pool(oct);
1114 case OCT_DEV_DISPATCH_INIT_DONE:
1115 octeon_delete_dispatch_list(oct);
1116 cancel_delayed_work_sync(&oct->nic_poll_work.work);
1119 case OCT_DEV_PCI_MAP_DONE:
1120 refcount = octeon_deregister_device(oct);
1122 /* Soft reset the octeon device before exiting.
1123 * However, if fw was loaded from card (i.e. autoboot),
1124 * perform an FLR instead.
1125 * Implementation note: only soft-reset the device
1126 * if it is a CN6XXX OR the LAST CN23XX device.
1128 if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1129 octeon_pci_flr(oct);
1130 else if (OCTEON_CN6XXX(oct) || !refcount)
1131 oct->fn_list.soft_reset(oct);
1133 octeon_unmap_pci_barx(oct, 0);
1134 octeon_unmap_pci_barx(oct, 1);
1137 case OCT_DEV_PCI_ENABLE_DONE:
1138 pci_clear_master(oct->pci_dev);
1139 /* Disable the device, releasing the PCI INT */
1140 pci_disable_device(oct->pci_dev);
1143 case OCT_DEV_BEGIN_STATE:
1144 /* Nothing to be done here either */
1146 } /* end switch (oct->status) */
1148 tasklet_kill(&oct_priv->droq_tasklet);
1152 * send_rx_ctrl_cmd - Send Rx control command
1153 * @lio: per-network private data
1154 * @start_stop: whether to start or stop
1156 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1158 struct octeon_soft_command *sc;
1159 union octnet_cmd *ncmd;
1160 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1163 if (oct->props[lio->ifidx].rx_on == start_stop)
1166 sc = (struct octeon_soft_command *)
1167 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1170 netif_info(lio, rx_err, lio->netdev,
1171 "Failed to allocate octeon_soft_command\n");
1175 ncmd = (union octnet_cmd *)sc->virtdptr;
1178 ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1179 ncmd->s.param1 = start_stop;
1181 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1183 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1185 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1186 OPCODE_NIC_CMD, 0, 0, 0);
1188 init_completion(&sc->complete);
1189 sc->sc_status = OCTEON_REQUEST_PENDING;
1191 retval = octeon_send_soft_command(oct, sc);
1192 if (retval == IQ_SEND_FAILED) {
1193 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1194 octeon_free_soft_command(oct, sc);
1197 /* Sleep on a wait queue till the cond flag indicates that the
1198 * response arrived or timed-out.
1200 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1204 oct->props[lio->ifidx].rx_on = start_stop;
1205 WRITE_ONCE(sc->caller_is_done, true);
1210 * liquidio_destroy_nic_device - Destroy NIC device interface
1211 * @oct: octeon device
1212 * @ifidx: which interface to destroy
1214 * Cleanup associated with each interface for an Octeon device when NIC
1215 * module is being unloaded or if initialization fails during load.
1217 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1219 struct net_device *netdev = oct->props[ifidx].netdev;
1220 struct octeon_device_priv *oct_priv =
1221 (struct octeon_device_priv *)oct->priv;
1222 struct napi_struct *napi, *n;
1226 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1231 lio = GET_LIO(netdev);
1233 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1235 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1236 liquidio_stop(netdev);
1238 if (oct->props[lio->ifidx].napi_enabled == 1) {
1239 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1242 oct->props[lio->ifidx].napi_enabled = 0;
1244 if (OCTEON_CN23XX_PF(oct))
1245 oct->droq[0]->ops.poll_mode = 0;
1249 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1250 netif_napi_del(napi);
1252 tasklet_enable(&oct_priv->droq_tasklet);
1254 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1255 unregister_netdev(netdev);
1257 cleanup_sync_octeon_time_wq(netdev);
1258 cleanup_link_status_change_wq(netdev);
1260 cleanup_rx_oom_poll_fn(netdev);
1262 lio_delete_glists(lio);
1264 free_netdev(netdev);
1266 oct->props[ifidx].gmxport = -1;
1268 oct->props[ifidx].netdev = NULL;
1272 * liquidio_stop_nic_module - Stop complete NIC functionality
1273 * @oct: octeon device
1275 static int liquidio_stop_nic_module(struct octeon_device *oct)
1280 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1281 if (!oct->ifcount) {
1282 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1286 spin_lock_bh(&oct->cmd_resp_wqlock);
1287 oct->cmd_resp_state = OCT_DRV_OFFLINE;
1288 spin_unlock_bh(&oct->cmd_resp_wqlock);
1290 lio_vf_rep_destroy(oct);
1292 for (i = 0; i < oct->ifcount; i++) {
1293 lio = GET_LIO(oct->props[i].netdev);
1294 for (j = 0; j < oct->num_oqs; j++)
1295 octeon_unregister_droq_ops(oct,
1296 lio->linfo.rxpciq[j].s.q_no);
1299 for (i = 0; i < oct->ifcount; i++)
1300 liquidio_destroy_nic_device(oct, i);
1303 devlink_unregister(oct->devlink);
1304 devlink_free(oct->devlink);
1305 oct->devlink = NULL;
1308 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1313 * liquidio_remove - Cleans up resources at unload time
1314 * @pdev: PCI device structure
1316 static void liquidio_remove(struct pci_dev *pdev)
1318 struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1320 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1322 if (oct_dev->watchdog_task)
1323 kthread_stop(oct_dev->watchdog_task);
1325 if (!oct_dev->octeon_id &&
1326 oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1327 lio_vf_rep_modexit();
1329 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1330 liquidio_stop_nic_module(oct_dev);
1332 /* Reset the octeon device and cleanup all memory allocated for
1333 * the octeon device by driver.
1335 octeon_destroy_resources(oct_dev);
1337 dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1339 /* This octeon device has been removed. Update the global
1340 * data structure to reflect this. Free the device structure.
1342 octeon_free_device_mem(oct_dev);
1346 * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space
1347 * @oct: octeon device
1349 static int octeon_chip_specific_setup(struct octeon_device *oct)
1354 pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1355 pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1356 oct->rev_id = rev_id & 0xff;
1359 case OCTEON_CN68XX_PCIID:
1360 oct->chip_id = OCTEON_CN68XX;
1361 ret = lio_setup_cn68xx_octeon_device(oct);
1364 case OCTEON_CN66XX_PCIID:
1365 oct->chip_id = OCTEON_CN66XX;
1366 ret = lio_setup_cn66xx_octeon_device(oct);
1369 case OCTEON_CN23XX_PCIID_PF:
1370 oct->chip_id = OCTEON_CN23XX_PF_VID;
1371 ret = setup_cn23xx_octeon_pf_device(oct);
1374 #ifdef CONFIG_PCI_IOV
1376 pci_sriov_set_totalvfs(oct->pci_dev,
1377 oct->sriov_info.max_vfs);
1382 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1390 * octeon_pci_os_setup - PCI initialization for each Octeon device.
1391 * @oct: octeon device
1393 static int octeon_pci_os_setup(struct octeon_device *oct)
1395 /* setup PCI stuff first */
1396 if (pci_enable_device(oct->pci_dev)) {
1397 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1401 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1402 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1403 pci_disable_device(oct->pci_dev);
1407 /* Enable PCI DMA Master. */
1408 pci_set_master(oct->pci_dev);
1414 * free_netbuf - Unmap and free network buffer
1417 static void free_netbuf(void *buf)
1419 struct sk_buff *skb;
1420 struct octnet_buf_free_info *finfo;
1423 finfo = (struct octnet_buf_free_info *)buf;
1427 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1430 tx_buffer_free(skb);
1434 * free_netsgbuf - Unmap and free gather buffer
1437 static void free_netsgbuf(void *buf)
1439 struct octnet_buf_free_info *finfo;
1440 struct sk_buff *skb;
1442 struct octnic_gather *g;
1445 finfo = (struct octnet_buf_free_info *)buf;
1449 frags = skb_shinfo(skb)->nr_frags;
1451 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1452 g->sg[0].ptr[0], (skb->len - skb->data_len),
1457 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
1459 pci_unmap_page((lio->oct_dev)->pci_dev,
1460 g->sg[(i >> 2)].ptr[(i & 3)],
1461 skb_frag_size(frag), DMA_TO_DEVICE);
1465 iq = skb_iq(lio->oct_dev, skb);
1466 spin_lock(&lio->glist_lock[iq]);
1467 list_add_tail(&g->list, &lio->glist[iq]);
1468 spin_unlock(&lio->glist_lock[iq]);
1470 tx_buffer_free(skb);
1474 * free_netsgbuf_with_resp - Unmap and free gather buffer with response
1477 static void free_netsgbuf_with_resp(void *buf)
1479 struct octeon_soft_command *sc;
1480 struct octnet_buf_free_info *finfo;
1481 struct sk_buff *skb;
1483 struct octnic_gather *g;
1486 sc = (struct octeon_soft_command *)buf;
1487 skb = (struct sk_buff *)sc->callback_arg;
1488 finfo = (struct octnet_buf_free_info *)&skb->cb;
1492 frags = skb_shinfo(skb)->nr_frags;
1494 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1495 g->sg[0].ptr[0], (skb->len - skb->data_len),
1500 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
1502 pci_unmap_page((lio->oct_dev)->pci_dev,
1503 g->sg[(i >> 2)].ptr[(i & 3)],
1504 skb_frag_size(frag), DMA_TO_DEVICE);
1508 iq = skb_iq(lio->oct_dev, skb);
1510 spin_lock(&lio->glist_lock[iq]);
1511 list_add_tail(&g->list, &lio->glist[iq]);
1512 spin_unlock(&lio->glist_lock[iq]);
1514 /* Don't free the skb yet */
1518 * liquidio_ptp_adjfreq - Adjust ptp frequency
1519 * @ptp: PTP clock info
1520 * @ppb: how much to adjust by, in parts-per-billion
1522 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1524 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1525 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1527 unsigned long flags;
1528 bool neg_adj = false;
1535 /* The hardware adds the clock compensation value to the
1536 * PTP clock on every coprocessor clock cycle, so we
1537 * compute the delta in terms of coprocessor clocks.
1539 delta = (u64)ppb << 32;
1540 do_div(delta, oct->coproc_clock_rate);
1542 spin_lock_irqsave(&lio->ptp_lock, flags);
1543 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1548 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1549 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1555 * liquidio_ptp_adjtime - Adjust ptp time
1556 * @ptp: PTP clock info
1557 * @delta: how much to adjust by, in nanosecs
1559 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1561 unsigned long flags;
1562 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1564 spin_lock_irqsave(&lio->ptp_lock, flags);
1565 lio->ptp_adjust += delta;
1566 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1572 * liquidio_ptp_gettime - Get hardware clock time, including any adjustment
1573 * @ptp: PTP clock info
1576 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1577 struct timespec64 *ts)
1580 unsigned long flags;
1581 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1582 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1584 spin_lock_irqsave(&lio->ptp_lock, flags);
1585 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1586 ns += lio->ptp_adjust;
1587 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1589 *ts = ns_to_timespec64(ns);
1595 * liquidio_ptp_settime - Set hardware clock time. Reset adjustment
1596 * @ptp: PTP clock info
1599 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1600 const struct timespec64 *ts)
1603 unsigned long flags;
1604 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1605 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1607 ns = timespec64_to_ns(ts);
1609 spin_lock_irqsave(&lio->ptp_lock, flags);
1610 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1611 lio->ptp_adjust = 0;
1612 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1618 * liquidio_ptp_enable - Check if PTP is enabled
1619 * @ptp: PTP clock info
1624 liquidio_ptp_enable(struct ptp_clock_info __maybe_unused *ptp,
1625 struct ptp_clock_request __maybe_unused *rq,
1626 int __maybe_unused on)
1632 * oct_ptp_open - Open PTP clock source
1633 * @netdev: network device
1635 static void oct_ptp_open(struct net_device *netdev)
1637 struct lio *lio = GET_LIO(netdev);
1638 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1640 spin_lock_init(&lio->ptp_lock);
1642 snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1643 lio->ptp_info.owner = THIS_MODULE;
1644 lio->ptp_info.max_adj = 250000000;
1645 lio->ptp_info.n_alarm = 0;
1646 lio->ptp_info.n_ext_ts = 0;
1647 lio->ptp_info.n_per_out = 0;
1648 lio->ptp_info.pps = 0;
1649 lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1650 lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1651 lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1652 lio->ptp_info.settime64 = liquidio_ptp_settime;
1653 lio->ptp_info.enable = liquidio_ptp_enable;
1655 lio->ptp_adjust = 0;
1657 lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1658 &oct->pci_dev->dev);
1660 if (IS_ERR(lio->ptp_clock))
1661 lio->ptp_clock = NULL;
1665 * liquidio_ptp_init - Init PTP clock
1666 * @oct: octeon device
1668 static void liquidio_ptp_init(struct octeon_device *oct)
1670 u64 clock_comp, cfg;
1672 clock_comp = (u64)NSEC_PER_SEC << 32;
1673 do_div(clock_comp, oct->coproc_clock_rate);
1674 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1677 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1678 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1682 * load_firmware - Load firmware to device
1683 * @oct: octeon device
1685 * Maps device to firmware filename, requests firmware, and downloads it
1687 static int load_firmware(struct octeon_device *oct)
1690 const struct firmware *fw;
1691 char fw_name[LIO_MAX_FW_FILENAME_LEN];
1694 if (fw_type_is_auto()) {
1695 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1696 strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
1698 tmp_fw_type = fw_type;
1701 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1702 octeon_get_conf(oct)->card_name, tmp_fw_type,
1703 LIO_FW_NAME_SUFFIX);
1705 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1707 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n",
1709 release_firmware(fw);
1713 ret = octeon_download_firmware(oct, fw->data, fw->size);
1715 release_firmware(fw);
1721 * octnet_poll_check_txq_status - Poll routine for checking transmit queue status
1722 * @work: work_struct data structure
1724 static void octnet_poll_check_txq_status(struct work_struct *work)
1726 struct cavium_wk *wk = (struct cavium_wk *)work;
1727 struct lio *lio = (struct lio *)wk->ctxptr;
1729 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1732 check_txq_status(lio);
1733 queue_delayed_work(lio->txq_status_wq.wq,
1734 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1738 * setup_tx_poll_fn - Sets up the txq poll check
1739 * @netdev: network device
1741 static inline int setup_tx_poll_fn(struct net_device *netdev)
1743 struct lio *lio = GET_LIO(netdev);
1744 struct octeon_device *oct = lio->oct_dev;
1746 lio->txq_status_wq.wq = alloc_workqueue("txq-status",
1748 if (!lio->txq_status_wq.wq) {
1749 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
1752 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
1753 octnet_poll_check_txq_status);
1754 lio->txq_status_wq.wk.ctxptr = lio;
1755 queue_delayed_work(lio->txq_status_wq.wq,
1756 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1760 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
1762 struct lio *lio = GET_LIO(netdev);
1764 if (lio->txq_status_wq.wq) {
1765 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
1766 destroy_workqueue(lio->txq_status_wq.wq);
1771 * liquidio_open - Net device open for LiquidIO
1772 * @netdev: network device
1774 static int liquidio_open(struct net_device *netdev)
1776 struct lio *lio = GET_LIO(netdev);
1777 struct octeon_device *oct = lio->oct_dev;
1778 struct octeon_device_priv *oct_priv =
1779 (struct octeon_device_priv *)oct->priv;
1780 struct napi_struct *napi, *n;
1782 if (oct->props[lio->ifidx].napi_enabled == 0) {
1783 tasklet_disable(&oct_priv->droq_tasklet);
1785 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1788 oct->props[lio->ifidx].napi_enabled = 1;
1790 if (OCTEON_CN23XX_PF(oct))
1791 oct->droq[0]->ops.poll_mode = 1;
1794 if (oct->ptp_enable)
1795 oct_ptp_open(netdev);
1797 ifstate_set(lio, LIO_IFSTATE_RUNNING);
1799 if (OCTEON_CN23XX_PF(oct)) {
1801 if (setup_tx_poll_fn(netdev))
1804 if (setup_tx_poll_fn(netdev))
1808 netif_tx_start_all_queues(netdev);
1810 /* Ready for link status updates */
1813 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1815 /* tell Octeon to start forwarding packets to host */
1816 send_rx_ctrl_cmd(lio, 1);
1818 /* start periodical statistics fetch */
1819 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
1820 lio->stats_wk.ctxptr = lio;
1821 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
1822 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
1824 dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
1831 * liquidio_stop - Net device stop for LiquidIO
1832 * @netdev: network device
1834 static int liquidio_stop(struct net_device *netdev)
1836 struct lio *lio = GET_LIO(netdev);
1837 struct octeon_device *oct = lio->oct_dev;
1838 struct octeon_device_priv *oct_priv =
1839 (struct octeon_device_priv *)oct->priv;
1840 struct napi_struct *napi, *n;
1842 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1844 /* Stop any link updates */
1849 /* Inform that netif carrier is down */
1850 netif_carrier_off(netdev);
1851 netif_tx_disable(netdev);
1853 lio->linfo.link.s.link_up = 0;
1854 lio->link_changes++;
1856 /* Tell Octeon that nic interface is down. */
1857 send_rx_ctrl_cmd(lio, 0);
1859 if (OCTEON_CN23XX_PF(oct)) {
1861 cleanup_tx_poll_fn(netdev);
1863 cleanup_tx_poll_fn(netdev);
1866 cancel_delayed_work_sync(&lio->stats_wk.work);
1868 if (lio->ptp_clock) {
1869 ptp_clock_unregister(lio->ptp_clock);
1870 lio->ptp_clock = NULL;
1873 /* Wait for any pending Rx descriptors */
1874 if (lio_wait_for_clean_oq(oct))
1875 netif_info(lio, rx_err, lio->netdev,
1876 "Proceeding with stop interface after partial RX desc processing\n");
1878 if (oct->props[lio->ifidx].napi_enabled == 1) {
1879 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1882 oct->props[lio->ifidx].napi_enabled = 0;
1884 if (OCTEON_CN23XX_PF(oct))
1885 oct->droq[0]->ops.poll_mode = 0;
1887 tasklet_enable(&oct_priv->droq_tasklet);
1890 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1896 * get_new_flags - Converts a mask based on net device flags
1897 * @netdev: network device
1899 * This routine generates a octnet_ifflags mask from the net device flags
1900 * received from the OS.
1902 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
1904 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1906 if (netdev->flags & IFF_PROMISC)
1907 f |= OCTNET_IFFLAG_PROMISC;
1909 if (netdev->flags & IFF_ALLMULTI)
1910 f |= OCTNET_IFFLAG_ALLMULTI;
1912 if (netdev->flags & IFF_MULTICAST) {
1913 f |= OCTNET_IFFLAG_MULTICAST;
1915 /* Accept all multicast addresses if there are more than we
1918 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1919 f |= OCTNET_IFFLAG_ALLMULTI;
1922 if (netdev->flags & IFF_BROADCAST)
1923 f |= OCTNET_IFFLAG_BROADCAST;
1929 * liquidio_set_mcast_list - Net device set_multicast_list
1930 * @netdev: network device
1932 static void liquidio_set_mcast_list(struct net_device *netdev)
1934 struct lio *lio = GET_LIO(netdev);
1935 struct octeon_device *oct = lio->oct_dev;
1936 struct octnic_ctrl_pkt nctrl;
1937 struct netdev_hw_addr *ha;
1940 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1942 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1944 /* Create a ctrl pkt command to be sent to core app. */
1946 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1947 nctrl.ncmd.s.param1 = get_new_flags(netdev);
1948 nctrl.ncmd.s.param2 = mc_count;
1949 nctrl.ncmd.s.more = mc_count;
1950 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1951 nctrl.netpndev = (u64)netdev;
1952 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1954 /* copy all the addresses into the udd */
1956 netdev_for_each_mc_addr(ha, netdev) {
1958 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
1959 /* no need to swap bytes */
1961 if (++mc > &nctrl.udd[mc_count])
1965 /* Apparently, any activity in this call from the kernel has to
1966 * be atomic. So we won't wait for response.
1969 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1971 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1977 * liquidio_set_mac - Net device set_mac_address
1978 * @netdev: network device
1979 * @p: pointer to sockaddr
1981 static int liquidio_set_mac(struct net_device *netdev, void *p)
1984 struct lio *lio = GET_LIO(netdev);
1985 struct octeon_device *oct = lio->oct_dev;
1986 struct sockaddr *addr = (struct sockaddr *)p;
1987 struct octnic_ctrl_pkt nctrl;
1989 if (!is_valid_ether_addr(addr->sa_data))
1990 return -EADDRNOTAVAIL;
1992 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1995 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
1996 nctrl.ncmd.s.param1 = 0;
1997 nctrl.ncmd.s.more = 1;
1998 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1999 nctrl.netpndev = (u64)netdev;
2002 /* The MAC Address is presented in network byte order. */
2003 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2005 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2007 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2011 if (nctrl.sc_status) {
2012 dev_err(&oct->pci_dev->dev,
2013 "%s: MAC Address change failed. sc return=%x\n",
2014 __func__, nctrl.sc_status);
2018 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2019 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2025 liquidio_get_stats64(struct net_device *netdev,
2026 struct rtnl_link_stats64 *lstats)
2028 struct lio *lio = GET_LIO(netdev);
2029 struct octeon_device *oct;
2030 u64 pkts = 0, drop = 0, bytes = 0;
2031 struct oct_droq_stats *oq_stats;
2032 struct oct_iq_stats *iq_stats;
2033 int i, iq_no, oq_no;
2037 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2040 for (i = 0; i < oct->num_iqs; i++) {
2041 iq_no = lio->linfo.txpciq[i].s.q_no;
2042 iq_stats = &oct->instr_queue[iq_no]->stats;
2043 pkts += iq_stats->tx_done;
2044 drop += iq_stats->tx_dropped;
2045 bytes += iq_stats->tx_tot_bytes;
2048 lstats->tx_packets = pkts;
2049 lstats->tx_bytes = bytes;
2050 lstats->tx_dropped = drop;
2056 for (i = 0; i < oct->num_oqs; i++) {
2057 oq_no = lio->linfo.rxpciq[i].s.q_no;
2058 oq_stats = &oct->droq[oq_no]->stats;
2059 pkts += oq_stats->rx_pkts_received;
2060 drop += (oq_stats->rx_dropped +
2061 oq_stats->dropped_nodispatch +
2062 oq_stats->dropped_toomany +
2063 oq_stats->dropped_nomem);
2064 bytes += oq_stats->rx_bytes_received;
2067 lstats->rx_bytes = bytes;
2068 lstats->rx_packets = pkts;
2069 lstats->rx_dropped = drop;
2071 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
2072 lstats->collisions = oct->link_stats.fromhost.total_collisions;
2074 /* detailed rx_errors: */
2075 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
2076 /* recved pkt with crc error */
2077 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
2078 /* recv'd frame alignment error */
2079 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
2080 /* recv'r fifo overrun */
2081 lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
2083 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
2084 lstats->rx_frame_errors + lstats->rx_fifo_errors;
2086 /* detailed tx_errors */
2087 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
2088 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
2089 lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
2091 lstats->tx_errors = lstats->tx_aborted_errors +
2092 lstats->tx_carrier_errors +
2093 lstats->tx_fifo_errors;
2097 * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
2098 * @netdev: network device
2099 * @ifr: interface request
2101 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2103 struct hwtstamp_config conf;
2104 struct lio *lio = GET_LIO(netdev);
2106 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2112 switch (conf.tx_type) {
2113 case HWTSTAMP_TX_ON:
2114 case HWTSTAMP_TX_OFF:
2120 switch (conf.rx_filter) {
2121 case HWTSTAMP_FILTER_NONE:
2123 case HWTSTAMP_FILTER_ALL:
2124 case HWTSTAMP_FILTER_SOME:
2125 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2126 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2127 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2128 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2129 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2130 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2131 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2132 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2133 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2134 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2135 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2136 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2137 case HWTSTAMP_FILTER_NTP_ALL:
2138 conf.rx_filter = HWTSTAMP_FILTER_ALL;
2144 if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2145 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2148 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2150 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2154 * liquidio_ioctl - ioctl handler
2155 * @netdev: network device
2156 * @ifr: interface request
2159 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2161 struct lio *lio = GET_LIO(netdev);
2165 if (lio->oct_dev->ptp_enable)
2166 return hwtstamp_ioctl(netdev, ifr);
2174 * handle_timestamp - handle a Tx timestamp response
2175 * @oct: octeon device
2176 * @status: response status
2177 * @buf: pointer to skb
2179 static void handle_timestamp(struct octeon_device *oct,
2183 struct octnet_buf_free_info *finfo;
2184 struct octeon_soft_command *sc;
2185 struct oct_timestamp_resp *resp;
2187 struct sk_buff *skb = (struct sk_buff *)buf;
2189 finfo = (struct octnet_buf_free_info *)skb->cb;
2193 resp = (struct oct_timestamp_resp *)sc->virtrptr;
2195 if (status != OCTEON_REQUEST_DONE) {
2196 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2197 CVM_CAST64(status));
2198 resp->timestamp = 0;
2201 octeon_swap_8B_data(&resp->timestamp, 1);
2203 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2204 struct skb_shared_hwtstamps ts;
2205 u64 ns = resp->timestamp;
2207 netif_info(lio, tx_done, lio->netdev,
2208 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2209 skb, (unsigned long long)ns);
2210 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2211 skb_tstamp_tx(skb, &ts);
2214 octeon_free_soft_command(oct, sc);
2215 tx_buffer_free(skb);
2219 * send_nic_timestamp_pkt - Send a data packet that will be timestamped
2220 * @oct: octeon device
2221 * @ndata: pointer to network data
2222 * @finfo: pointer to private network data
2223 * @xmit_more: more is coming
2225 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2226 struct octnic_data_pkt *ndata,
2227 struct octnet_buf_free_info *finfo,
2231 struct octeon_soft_command *sc;
2238 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2239 sizeof(struct oct_timestamp_resp));
2243 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2244 return IQ_SEND_FAILED;
2247 if (ndata->reqtype == REQTYPE_NORESP_NET)
2248 ndata->reqtype = REQTYPE_RESP_NET;
2249 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2250 ndata->reqtype = REQTYPE_RESP_NET_SG;
2252 sc->callback = handle_timestamp;
2253 sc->callback_arg = finfo->skb;
2254 sc->iq_no = ndata->q_no;
2256 if (OCTEON_CN23XX_PF(oct))
2257 len = (u32)((struct octeon_instr_ih3 *)
2258 (&sc->cmd.cmd3.ih3))->dlengsz;
2260 len = (u32)((struct octeon_instr_ih2 *)
2261 (&sc->cmd.cmd2.ih2))->dlengsz;
2263 ring_doorbell = !xmit_more;
2265 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2266 sc, len, ndata->reqtype);
2268 if (retval == IQ_SEND_FAILED) {
2269 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2271 octeon_free_soft_command(oct, sc);
2273 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2280 * liquidio_xmit - Transmit networks packets to the Octeon interface
2281 * @skb: skbuff struct to be passed to network layer.
2282 * @netdev: pointer to network device
2284 * Return: whether the packet was transmitted to the device okay or not
2285 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
2287 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2290 struct octnet_buf_free_info *finfo;
2291 union octnic_cmd_setup cmdsetup;
2292 struct octnic_data_pkt ndata;
2293 struct octeon_device *oct;
2294 struct oct_iq_stats *stats;
2295 struct octeon_instr_irh *irh;
2296 union tx_info *tx_info;
2298 int q_idx = 0, iq_no = 0;
2299 int j, xmit_more = 0;
2303 lio = GET_LIO(netdev);
2306 q_idx = skb_iq(oct, skb);
2308 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2310 stats = &oct->instr_queue[iq_no]->stats;
2312 /* Check for all conditions in which the current packet cannot be
2315 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2316 (!lio->linfo.link.s.link_up) ||
2318 netif_info(lio, tx_err, lio->netdev,
2319 "Transmit failed link_status : %d\n",
2320 lio->linfo.link.s.link_up);
2321 goto lio_xmit_failed;
2324 /* Use space in skb->cb to store info used to unmap and
2327 finfo = (struct octnet_buf_free_info *)skb->cb;
2332 /* Prepare the attributes for the data to be passed to OSI. */
2333 memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2335 ndata.buf = (void *)finfo;
2339 if (octnet_iq_is_full(oct, ndata.q_no)) {
2340 /* defer sending if queue is full */
2341 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2343 stats->tx_iq_busy++;
2344 return NETDEV_TX_BUSY;
2347 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
2348 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2351 ndata.datasize = skb->len;
2354 cmdsetup.s.iq_no = iq_no;
2356 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2357 if (skb->encapsulation) {
2358 cmdsetup.s.tnl_csum = 1;
2361 cmdsetup.s.transport_csum = 1;
2364 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2365 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2366 cmdsetup.s.timestamp = 1;
2369 if (skb_shinfo(skb)->nr_frags == 0) {
2370 cmdsetup.s.u.datasize = skb->len;
2371 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2373 /* Offload checksum calculation for TCP/UDP packets */
2374 dptr = dma_map_single(&oct->pci_dev->dev,
2378 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2379 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2381 stats->tx_dmamap_fail++;
2382 return NETDEV_TX_BUSY;
2385 if (OCTEON_CN23XX_PF(oct))
2386 ndata.cmd.cmd3.dptr = dptr;
2388 ndata.cmd.cmd2.dptr = dptr;
2390 ndata.reqtype = REQTYPE_NORESP_NET;
2395 struct octnic_gather *g;
2397 spin_lock(&lio->glist_lock[q_idx]);
2398 g = (struct octnic_gather *)
2399 lio_list_delete_head(&lio->glist[q_idx]);
2400 spin_unlock(&lio->glist_lock[q_idx]);
2403 netif_info(lio, tx_err, lio->netdev,
2404 "Transmit scatter gather: glist null!\n");
2405 goto lio_xmit_failed;
2408 cmdsetup.s.gather = 1;
2409 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2410 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2412 memset(g->sg, 0, g->sg_size);
2414 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2416 (skb->len - skb->data_len),
2418 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2419 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2421 stats->tx_dmamap_fail++;
2422 return NETDEV_TX_BUSY;
2424 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2426 frags = skb_shinfo(skb)->nr_frags;
2429 frag = &skb_shinfo(skb)->frags[i - 1];
2431 g->sg[(i >> 2)].ptr[(i & 3)] =
2432 skb_frag_dma_map(&oct->pci_dev->dev,
2433 frag, 0, skb_frag_size(frag),
2436 if (dma_mapping_error(&oct->pci_dev->dev,
2437 g->sg[i >> 2].ptr[i & 3])) {
2438 dma_unmap_single(&oct->pci_dev->dev,
2440 skb->len - skb->data_len,
2442 for (j = 1; j < i; j++) {
2443 frag = &skb_shinfo(skb)->frags[j - 1];
2444 dma_unmap_page(&oct->pci_dev->dev,
2445 g->sg[j >> 2].ptr[j & 3],
2446 skb_frag_size(frag),
2449 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2451 return NETDEV_TX_BUSY;
2454 add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
2459 dptr = g->sg_dma_ptr;
2461 if (OCTEON_CN23XX_PF(oct))
2462 ndata.cmd.cmd3.dptr = dptr;
2464 ndata.cmd.cmd2.dptr = dptr;
2468 ndata.reqtype = REQTYPE_NORESP_NET_SG;
2471 if (OCTEON_CN23XX_PF(oct)) {
2472 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2473 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2475 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2476 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2479 if (skb_shinfo(skb)->gso_size) {
2480 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2481 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2485 /* HW insert VLAN tag */
2486 if (skb_vlan_tag_present(skb)) {
2487 irh->priority = skb_vlan_tag_get(skb) >> 13;
2488 irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2491 xmit_more = netdev_xmit_more();
2493 if (unlikely(cmdsetup.s.timestamp))
2494 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2496 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2497 if (status == IQ_SEND_FAILED)
2498 goto lio_xmit_failed;
2500 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2502 if (status == IQ_SEND_STOP)
2503 netif_stop_subqueue(netdev, q_idx);
2505 netif_trans_update(netdev);
2507 if (tx_info->s.gso_segs)
2508 stats->tx_done += tx_info->s.gso_segs;
2511 stats->tx_tot_bytes += ndata.datasize;
2513 return NETDEV_TX_OK;
2516 stats->tx_dropped++;
2517 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2518 iq_no, stats->tx_dropped);
2520 dma_unmap_single(&oct->pci_dev->dev, dptr,
2521 ndata.datasize, DMA_TO_DEVICE);
2523 octeon_ring_doorbell_locked(oct, iq_no);
2525 tx_buffer_free(skb);
2526 return NETDEV_TX_OK;
2530 * liquidio_tx_timeout - Network device Tx timeout
2531 * @netdev: pointer to network device
2532 * @txqueue: index of the hung transmit queue
2534 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2538 lio = GET_LIO(netdev);
2540 netif_info(lio, tx_err, lio->netdev,
2541 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2542 netdev->stats.tx_dropped);
2543 netif_trans_update(netdev);
2547 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2548 __be16 proto __attribute__((unused)),
2551 struct lio *lio = GET_LIO(netdev);
2552 struct octeon_device *oct = lio->oct_dev;
2553 struct octnic_ctrl_pkt nctrl;
2556 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2559 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2560 nctrl.ncmd.s.param1 = vid;
2561 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2562 nctrl.netpndev = (u64)netdev;
2563 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2565 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2567 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2576 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2577 __be16 proto __attribute__((unused)),
2580 struct lio *lio = GET_LIO(netdev);
2581 struct octeon_device *oct = lio->oct_dev;
2582 struct octnic_ctrl_pkt nctrl;
2585 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2588 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2589 nctrl.ncmd.s.param1 = vid;
2590 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2591 nctrl.netpndev = (u64)netdev;
2592 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2594 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2596 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
2605 * liquidio_set_rxcsum_command - Sending command to enable/disable RX checksum offload
2606 * @netdev: pointer to network device
2607 * @command: OCTNET_CMD_TNL_RX_CSUM_CTL
2608 * @rx_cmd: OCTNET_CMD_RXCSUM_ENABLE/OCTNET_CMD_RXCSUM_DISABLE
2609 * Returns: SUCCESS or FAILURE
2611 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2614 struct lio *lio = GET_LIO(netdev);
2615 struct octeon_device *oct = lio->oct_dev;
2616 struct octnic_ctrl_pkt nctrl;
2619 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2622 nctrl.ncmd.s.cmd = command;
2623 nctrl.ncmd.s.param1 = rx_cmd;
2624 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2625 nctrl.netpndev = (u64)netdev;
2626 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2628 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2630 dev_err(&oct->pci_dev->dev,
2631 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2640 * liquidio_vxlan_port_command - Sending command to add/delete VxLAN UDP port to firmware
2641 * @netdev: pointer to network device
2642 * @command: OCTNET_CMD_VXLAN_PORT_CONFIG
2643 * @vxlan_port: VxLAN port to be added or deleted
2644 * @vxlan_cmd_bit: OCTNET_CMD_VXLAN_PORT_ADD,
2645 * OCTNET_CMD_VXLAN_PORT_DEL
2646 * Return: SUCCESS or FAILURE
2648 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2649 u16 vxlan_port, u8 vxlan_cmd_bit)
2651 struct lio *lio = GET_LIO(netdev);
2652 struct octeon_device *oct = lio->oct_dev;
2653 struct octnic_ctrl_pkt nctrl;
2656 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2659 nctrl.ncmd.s.cmd = command;
2660 nctrl.ncmd.s.more = vxlan_cmd_bit;
2661 nctrl.ncmd.s.param1 = vxlan_port;
2662 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2663 nctrl.netpndev = (u64)netdev;
2664 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2666 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2668 dev_err(&oct->pci_dev->dev,
2669 "VxLAN port add/delete failed in core (ret:0x%x)\n",
2677 static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
2678 unsigned int table, unsigned int entry,
2679 struct udp_tunnel_info *ti)
2681 return liquidio_vxlan_port_command(netdev,
2682 OCTNET_CMD_VXLAN_PORT_CONFIG,
2684 OCTNET_CMD_VXLAN_PORT_ADD);
2687 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
2690 struct udp_tunnel_info *ti)
2692 return liquidio_vxlan_port_command(netdev,
2693 OCTNET_CMD_VXLAN_PORT_CONFIG,
2695 OCTNET_CMD_VXLAN_PORT_DEL);
2698 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
2699 .set_port = liquidio_udp_tunnel_set_port,
2700 .unset_port = liquidio_udp_tunnel_unset_port,
2702 { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
2707 * liquidio_fix_features - Net device fix features
2708 * @netdev: pointer to network device
2709 * @request: features requested
2710 * Return: updated features list
2712 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2713 netdev_features_t request)
2715 struct lio *lio = netdev_priv(netdev);
2717 if ((request & NETIF_F_RXCSUM) &&
2718 !(lio->dev_capability & NETIF_F_RXCSUM))
2719 request &= ~NETIF_F_RXCSUM;
2721 if ((request & NETIF_F_HW_CSUM) &&
2722 !(lio->dev_capability & NETIF_F_HW_CSUM))
2723 request &= ~NETIF_F_HW_CSUM;
2725 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2726 request &= ~NETIF_F_TSO;
2728 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2729 request &= ~NETIF_F_TSO6;
2731 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2732 request &= ~NETIF_F_LRO;
2734 /*Disable LRO if RXCSUM is off */
2735 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2736 (lio->dev_capability & NETIF_F_LRO))
2737 request &= ~NETIF_F_LRO;
2739 if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2740 !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2741 request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2747 * liquidio_set_features - Net device set features
2748 * @netdev: pointer to network device
2749 * @features: features to enable/disable
2751 static int liquidio_set_features(struct net_device *netdev,
2752 netdev_features_t features)
2754 struct lio *lio = netdev_priv(netdev);
2756 if ((features & NETIF_F_LRO) &&
2757 (lio->dev_capability & NETIF_F_LRO) &&
2758 !(netdev->features & NETIF_F_LRO))
2759 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2760 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2761 else if (!(features & NETIF_F_LRO) &&
2762 (lio->dev_capability & NETIF_F_LRO) &&
2763 (netdev->features & NETIF_F_LRO))
2764 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2765 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2767 /* Sending command to firmware to enable/disable RX checksum
2768 * offload settings using ethtool
2770 if (!(netdev->features & NETIF_F_RXCSUM) &&
2771 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2772 (features & NETIF_F_RXCSUM))
2773 liquidio_set_rxcsum_command(netdev,
2774 OCTNET_CMD_TNL_RX_CSUM_CTL,
2775 OCTNET_CMD_RXCSUM_ENABLE);
2776 else if ((netdev->features & NETIF_F_RXCSUM) &&
2777 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2778 !(features & NETIF_F_RXCSUM))
2779 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2780 OCTNET_CMD_RXCSUM_DISABLE);
2782 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2783 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2784 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2785 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2786 OCTNET_CMD_VLAN_FILTER_ENABLE);
2787 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2788 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2789 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2790 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2791 OCTNET_CMD_VLAN_FILTER_DISABLE);
2796 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2797 u8 *mac, bool is_admin_assigned)
2799 struct lio *lio = GET_LIO(netdev);
2800 struct octeon_device *oct = lio->oct_dev;
2801 struct octnic_ctrl_pkt nctrl;
2804 if (!is_valid_ether_addr(mac))
2807 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
2810 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2813 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2814 /* vfidx is 0 based, but vf_num (param1) is 1 based */
2815 nctrl.ncmd.s.param1 = vfidx + 1;
2816 nctrl.ncmd.s.more = 1;
2817 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2818 nctrl.netpndev = (u64)netdev;
2819 if (is_admin_assigned) {
2820 nctrl.ncmd.s.param2 = true;
2821 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2825 /* The MAC Address is presented in network byte order. */
2826 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
2828 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
2830 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2837 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
2839 struct lio *lio = GET_LIO(netdev);
2840 struct octeon_device *oct = lio->oct_dev;
2843 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2846 retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
2848 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
2853 static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx,
2856 struct lio *lio = GET_LIO(netdev);
2857 struct octeon_device *oct = lio->oct_dev;
2858 struct octnic_ctrl_pkt nctrl;
2861 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) {
2862 netif_info(lio, drv, lio->netdev,
2863 "firmware does not support spoofchk\n");
2867 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
2868 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
2873 if (oct->sriov_info.vf_spoofchk[vfidx])
2877 if (!oct->sriov_info.vf_spoofchk[vfidx])
2881 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2882 nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1;
2883 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK;
2884 nctrl.ncmd.s.param1 =
2885 vfidx + 1; /* vfidx is 0 based,
2886 * but vf_num (param1) is 1 based
2888 nctrl.ncmd.s.param2 = enable;
2889 nctrl.ncmd.s.more = 0;
2890 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2893 retval = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2896 netif_info(lio, drv, lio->netdev,
2897 "Failed to set VF %d spoofchk %s\n", vfidx,
2898 enable ? "on" : "off");
2902 oct->sriov_info.vf_spoofchk[vfidx] = enable;
2903 netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx,
2904 enable ? "on" : "off");
2909 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
2910 u16 vlan, u8 qos, __be16 vlan_proto)
2912 struct lio *lio = GET_LIO(netdev);
2913 struct octeon_device *oct = lio->oct_dev;
2914 struct octnic_ctrl_pkt nctrl;
2918 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2921 if (vlan_proto != htons(ETH_P_8021Q))
2922 return -EPROTONOSUPPORT;
2924 if (vlan >= VLAN_N_VID || qos > 7)
2928 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
2932 if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
2935 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2938 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2940 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2942 nctrl.ncmd.s.param1 = vlantci;
2943 nctrl.ncmd.s.param2 =
2944 vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
2945 nctrl.ncmd.s.more = 0;
2946 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2949 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2956 oct->sriov_info.vf_vlantci[vfidx] = vlantci;
2961 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
2962 struct ifla_vf_info *ivi)
2964 struct lio *lio = GET_LIO(netdev);
2965 struct octeon_device *oct = lio->oct_dev;
2968 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2971 memset(ivi, 0, sizeof(struct ifla_vf_info));
2974 macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
2975 ether_addr_copy(&ivi->mac[0], macaddr);
2976 ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
2977 ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
2978 if (oct->sriov_info.trusted_vf.active &&
2979 oct->sriov_info.trusted_vf.id == vfidx)
2980 ivi->trusted = true;
2982 ivi->trusted = false;
2983 ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
2984 ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx];
2985 ivi->max_tx_rate = lio->linfo.link.s.speed;
2986 ivi->min_tx_rate = 0;
2991 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
2993 struct octeon_device *oct = lio->oct_dev;
2994 struct octeon_soft_command *sc;
2997 sc = octeon_alloc_soft_command(oct, 0, 16, 0);
3001 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
3003 /* vfidx is 0 based, but vf_num (param1) is 1 based */
3004 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
3005 OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
3008 init_completion(&sc->complete);
3009 sc->sc_status = OCTEON_REQUEST_PENDING;
3011 retval = octeon_send_soft_command(oct, sc);
3012 if (retval == IQ_SEND_FAILED) {
3013 octeon_free_soft_command(oct, sc);
3016 /* Wait for response or timeout */
3017 retval = wait_for_sc_completion_timeout(oct, sc, 0);
3021 WRITE_ONCE(sc->caller_is_done, true);
3027 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
3030 struct lio *lio = GET_LIO(netdev);
3031 struct octeon_device *oct = lio->oct_dev;
3033 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
3034 /* trusted vf is not supported by firmware older than 1.7.1 */
3038 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
3039 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
3046 if (oct->sriov_info.trusted_vf.active &&
3047 oct->sriov_info.trusted_vf.id == vfidx)
3050 if (oct->sriov_info.trusted_vf.active) {
3051 netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
3057 if (!oct->sriov_info.trusted_vf.active)
3061 if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3063 oct->sriov_info.trusted_vf.id = vfidx;
3064 oct->sriov_info.trusted_vf.active = true;
3066 oct->sriov_info.trusted_vf.active = false;
3069 netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3070 setting ? "" : "not ");
3072 netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3079 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3082 struct lio *lio = GET_LIO(netdev);
3083 struct octeon_device *oct = lio->oct_dev;
3084 struct octnic_ctrl_pkt nctrl;
3087 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3090 if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3093 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3094 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3095 nctrl.ncmd.s.param1 =
3096 vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3097 nctrl.ncmd.s.param2 = linkstate;
3098 nctrl.ncmd.s.more = 0;
3099 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3102 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
3105 oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3113 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3115 struct lio_devlink_priv *priv;
3116 struct octeon_device *oct;
3118 priv = devlink_priv(devlink);
3121 *mode = oct->eswitch_mode;
3127 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
3128 struct netlink_ext_ack *extack)
3130 struct lio_devlink_priv *priv;
3131 struct octeon_device *oct;
3134 priv = devlink_priv(devlink);
3137 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3140 if (oct->eswitch_mode == mode)
3144 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3145 oct->eswitch_mode = mode;
3146 ret = lio_vf_rep_create(oct);
3149 case DEVLINK_ESWITCH_MODE_LEGACY:
3150 lio_vf_rep_destroy(oct);
3151 oct->eswitch_mode = mode;
3161 static const struct devlink_ops liquidio_devlink_ops = {
3162 .eswitch_mode_get = liquidio_eswitch_mode_get,
3163 .eswitch_mode_set = liquidio_eswitch_mode_set,
3167 liquidio_get_port_parent_id(struct net_device *dev,
3168 struct netdev_phys_item_id *ppid)
3170 struct lio *lio = GET_LIO(dev);
3171 struct octeon_device *oct = lio->oct_dev;
3173 if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3176 ppid->id_len = ETH_ALEN;
3177 ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
3182 static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
3183 struct ifla_vf_stats *vf_stats)
3185 struct lio *lio = GET_LIO(netdev);
3186 struct octeon_device *oct = lio->oct_dev;
3187 struct oct_vf_stats stats;
3190 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3193 memset(&stats, 0, sizeof(struct oct_vf_stats));
3194 ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
3196 vf_stats->rx_packets = stats.rx_packets;
3197 vf_stats->tx_packets = stats.tx_packets;
3198 vf_stats->rx_bytes = stats.rx_bytes;
3199 vf_stats->tx_bytes = stats.tx_bytes;
3200 vf_stats->broadcast = stats.broadcast;
3201 vf_stats->multicast = stats.multicast;
3207 static const struct net_device_ops lionetdevops = {
3208 .ndo_open = liquidio_open,
3209 .ndo_stop = liquidio_stop,
3210 .ndo_start_xmit = liquidio_xmit,
3211 .ndo_get_stats64 = liquidio_get_stats64,
3212 .ndo_set_mac_address = liquidio_set_mac,
3213 .ndo_set_rx_mode = liquidio_set_mcast_list,
3214 .ndo_tx_timeout = liquidio_tx_timeout,
3216 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
3217 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
3218 .ndo_change_mtu = liquidio_change_mtu,
3219 .ndo_do_ioctl = liquidio_ioctl,
3220 .ndo_fix_features = liquidio_fix_features,
3221 .ndo_set_features = liquidio_set_features,
3222 .ndo_set_vf_mac = liquidio_set_vf_mac,
3223 .ndo_set_vf_vlan = liquidio_set_vf_vlan,
3224 .ndo_get_vf_config = liquidio_get_vf_config,
3225 .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk,
3226 .ndo_set_vf_trust = liquidio_set_vf_trust,
3227 .ndo_set_vf_link_state = liquidio_set_vf_link_state,
3228 .ndo_get_vf_stats = liquidio_get_vf_stats,
3229 .ndo_get_port_parent_id = liquidio_get_port_parent_id,
3233 * liquidio_init - Entry point for the liquidio module
3235 static int __init liquidio_init(void)
3238 struct handshake *hs;
3240 init_completion(&first_stage);
3242 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3244 if (liquidio_init_pci())
3247 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3249 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3252 wait_for_completion(&hs->init);
3254 /* init handshake failed */
3255 dev_err(&hs->pci_dev->dev,
3256 "Failed to init device\n");
3257 liquidio_deinit_pci();
3263 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3266 wait_for_completion_timeout(&hs->started,
3267 msecs_to_jiffies(30000));
3268 if (!hs->started_ok) {
3269 /* starter handshake failed */
3270 dev_err(&hs->pci_dev->dev,
3271 "Firmware failed to start\n");
3272 liquidio_deinit_pci();
3281 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3283 struct octeon_device *oct = (struct octeon_device *)buf;
3284 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3286 union oct_link_status *ls;
3289 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3290 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3291 recv_pkt->buffer_size[0],
3292 recv_pkt->rh.r_nic_info.gmxport);
3296 gmxport = recv_pkt->rh.r_nic_info.gmxport;
3297 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3298 OCT_DROQ_INFO_SIZE);
3300 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3301 for (i = 0; i < oct->ifcount; i++) {
3302 if (oct->props[i].gmxport == gmxport) {
3303 update_link_status(oct->props[i].netdev, ls);
3309 for (i = 0; i < recv_pkt->buffer_count; i++)
3310 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3311 octeon_free_recv_info(recv_info);
3316 * setup_nic_devices - Setup network interfaces
3317 * @octeon_dev: octeon device
3319 * Called during init time for each device. It assumes the NIC
3320 * is already up and running. The link information for each
3321 * interface is passed in link_info.
3323 static int setup_nic_devices(struct octeon_device *octeon_dev)
3325 struct lio *lio = NULL;
3326 struct net_device *netdev;
3327 u8 mac[6], i, j, *fw_ver, *micro_ver;
3328 unsigned long micro;
3330 struct octeon_soft_command *sc;
3331 struct liquidio_if_cfg_resp *resp;
3332 struct octdev_props *props;
3333 int retval, num_iqueues, num_oqueues;
3334 int max_num_queues = 0;
3335 union oct_nic_if_cfg if_cfg;
3336 unsigned int base_queue;
3337 unsigned int gmx_port_id;
3338 u32 resp_size, data_size;
3340 struct lio_version *vdata;
3341 struct devlink *devlink;
3342 struct lio_devlink_priv *lio_devlink;
3344 /* This is to handle link status changes */
3345 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3347 lio_nic_info, octeon_dev);
3349 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3350 * They are handled directly.
3352 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3355 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3358 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3359 free_netsgbuf_with_resp);
3361 for (i = 0; i < octeon_dev->ifcount; i++) {
3362 resp_size = sizeof(struct liquidio_if_cfg_resp);
3363 data_size = sizeof(struct lio_version);
3364 sc = (struct octeon_soft_command *)
3365 octeon_alloc_soft_command(octeon_dev, data_size,
3367 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3368 vdata = (struct lio_version *)sc->virtdptr;
3370 *((u64 *)vdata) = 0;
3371 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3372 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3373 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3375 if (OCTEON_CN23XX_PF(octeon_dev)) {
3376 num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3377 num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3378 base_queue = octeon_dev->sriov_info.pf_srn;
3380 gmx_port_id = octeon_dev->pf_num;
3381 ifidx_or_pfnum = octeon_dev->pf_num;
3383 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3384 octeon_get_conf(octeon_dev), i);
3385 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3386 octeon_get_conf(octeon_dev), i);
3387 base_queue = CFG_GET_BASE_QUE_NIC_IF(
3388 octeon_get_conf(octeon_dev), i);
3389 gmx_port_id = CFG_GET_GMXID_NIC_IF(
3390 octeon_get_conf(octeon_dev), i);
3394 dev_dbg(&octeon_dev->pci_dev->dev,
3395 "requesting config for interface %d, iqs %d, oqs %d\n",
3396 ifidx_or_pfnum, num_iqueues, num_oqueues);
3399 if_cfg.s.num_iqueues = num_iqueues;
3400 if_cfg.s.num_oqueues = num_oqueues;
3401 if_cfg.s.base_queue = base_queue;
3402 if_cfg.s.gmx_port_id = gmx_port_id;
3406 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3407 OPCODE_NIC_IF_CFG, 0,
3410 init_completion(&sc->complete);
3411 sc->sc_status = OCTEON_REQUEST_PENDING;
3413 retval = octeon_send_soft_command(octeon_dev, sc);
3414 if (retval == IQ_SEND_FAILED) {
3415 dev_err(&octeon_dev->pci_dev->dev,
3416 "iq/oq config failed status: %x\n",
3418 /* Soft instr is freed by driver in case of failure. */
3419 octeon_free_soft_command(octeon_dev, sc);
3423 /* Sleep on a wait queue till the cond flag indicates that the
3424 * response arrived or timed-out.
3426 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
3430 retval = resp->status;
3432 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3433 WRITE_ONCE(sc->caller_is_done, true);
3434 goto setup_nic_dev_done;
3436 snprintf(octeon_dev->fw_info.liquidio_firmware_version,
3438 resp->cfg_info.liquidio_firmware_version);
3440 /* Verify f/w version (in case of 'auto' loading from flash) */
3441 fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3442 if (memcmp(LIQUIDIO_BASE_VERSION,
3444 strlen(LIQUIDIO_BASE_VERSION))) {
3445 dev_err(&octeon_dev->pci_dev->dev,
3446 "Unmatched firmware version. Expected %s.x, got %s.\n",
3447 LIQUIDIO_BASE_VERSION, fw_ver);
3448 WRITE_ONCE(sc->caller_is_done, true);
3449 goto setup_nic_dev_done;
3450 } else if (atomic_read(octeon_dev->adapter_fw_state) ==
3452 dev_info(&octeon_dev->pci_dev->dev,
3453 "Using auto-loaded firmware version %s.\n",
3457 /* extract micro version field; point past '<maj>.<min>.' */
3458 micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
3459 if (kstrtoul(micro_ver, 10, µ) != 0)
3461 octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
3462 octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
3463 octeon_dev->fw_info.ver.rev = micro;
3465 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3466 (sizeof(struct liquidio_if_cfg_info)) >> 3);
3468 num_iqueues = hweight64(resp->cfg_info.iqmask);
3469 num_oqueues = hweight64(resp->cfg_info.oqmask);
3471 if (!(num_iqueues) || !(num_oqueues)) {
3472 dev_err(&octeon_dev->pci_dev->dev,
3473 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3474 resp->cfg_info.iqmask,
3475 resp->cfg_info.oqmask);
3476 WRITE_ONCE(sc->caller_is_done, true);
3477 goto setup_nic_dev_done;
3480 if (OCTEON_CN6XXX(octeon_dev)) {
3481 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3483 } else if (OCTEON_CN23XX_PF(octeon_dev)) {
3484 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3488 dev_dbg(&octeon_dev->pci_dev->dev,
3489 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3490 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3491 num_iqueues, num_oqueues, max_num_queues);
3492 netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
3495 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3496 WRITE_ONCE(sc->caller_is_done, true);
3497 goto setup_nic_dev_done;
3500 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3502 /* Associate the routines that will handle different
3505 netdev->netdev_ops = &lionetdevops;
3507 retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
3509 dev_err(&octeon_dev->pci_dev->dev,
3510 "setting real number rx failed\n");
3511 WRITE_ONCE(sc->caller_is_done, true);
3512 goto setup_nic_dev_free;
3515 retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
3517 dev_err(&octeon_dev->pci_dev->dev,
3518 "setting real number tx failed\n");
3519 WRITE_ONCE(sc->caller_is_done, true);
3520 goto setup_nic_dev_free;
3523 lio = GET_LIO(netdev);
3525 memset(lio, 0, sizeof(struct lio));
3527 lio->ifidx = ifidx_or_pfnum;
3529 props = &octeon_dev->props[i];
3530 props->gmxport = resp->cfg_info.linfo.gmxport;
3531 props->netdev = netdev;
3533 lio->linfo.num_rxpciq = num_oqueues;
3534 lio->linfo.num_txpciq = num_iqueues;
3535 for (j = 0; j < num_oqueues; j++) {
3536 lio->linfo.rxpciq[j].u64 =
3537 resp->cfg_info.linfo.rxpciq[j].u64;
3539 for (j = 0; j < num_iqueues; j++) {
3540 lio->linfo.txpciq[j].u64 =
3541 resp->cfg_info.linfo.txpciq[j].u64;
3543 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3544 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3545 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3547 WRITE_ONCE(sc->caller_is_done, true);
3549 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3551 if (OCTEON_CN23XX_PF(octeon_dev) ||
3552 OCTEON_CN6XXX(octeon_dev)) {
3553 lio->dev_capability = NETIF_F_HIGHDMA
3556 | NETIF_F_SG | NETIF_F_RXCSUM
3558 | NETIF_F_TSO | NETIF_F_TSO6
3561 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3563 /* Copy of transmit encapsulation capabilities:
3564 * TSO, TSO6, Checksums for this device
3566 lio->enc_dev_capability = NETIF_F_IP_CSUM
3568 | NETIF_F_GSO_UDP_TUNNEL
3569 | NETIF_F_HW_CSUM | NETIF_F_SG
3571 | NETIF_F_TSO | NETIF_F_TSO6
3574 netdev->hw_enc_features = (lio->enc_dev_capability &
3577 netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
3579 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3581 netdev->vlan_features = lio->dev_capability;
3582 /* Add any unchangeable hw features */
3583 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
3584 NETIF_F_HW_VLAN_CTAG_RX |
3585 NETIF_F_HW_VLAN_CTAG_TX;
3587 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3589 netdev->hw_features = lio->dev_capability;
3590 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3591 netdev->hw_features = netdev->hw_features &
3592 ~NETIF_F_HW_VLAN_CTAG_RX;
3594 /* MTU range: 68 - 16000 */
3595 netdev->min_mtu = LIO_MIN_MTU_SIZE;
3596 netdev->max_mtu = LIO_MAX_MTU_SIZE;
3598 /* Point to the properties for octeon device to which this
3599 * interface belongs.
3601 lio->oct_dev = octeon_dev;
3602 lio->octprops = props;
3603 lio->netdev = netdev;
3605 dev_dbg(&octeon_dev->pci_dev->dev,
3606 "if%d gmx: %d hw_addr: 0x%llx\n", i,
3607 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3609 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3612 eth_random_addr(vfmac);
3613 if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
3614 dev_err(&octeon_dev->pci_dev->dev,
3615 "Error setting VF%d MAC address\n",
3617 goto setup_nic_dev_free;
3621 /* 64-bit swap required on LE machines */
3622 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3623 for (j = 0; j < 6; j++)
3624 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3626 /* Copy MAC Address to OS network device structure */
3628 ether_addr_copy(netdev->dev_addr, mac);
3630 /* By default all interfaces on a single Octeon uses the same
3633 lio->txq = lio->linfo.txpciq[0].s.q_no;
3634 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3635 if (liquidio_setup_io_queues(octeon_dev, i,
3636 lio->linfo.num_txpciq,
3637 lio->linfo.num_rxpciq)) {
3638 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3639 goto setup_nic_dev_free;
3642 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3644 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3645 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3647 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
3648 dev_err(&octeon_dev->pci_dev->dev,
3649 "Gather list allocation failed\n");
3650 goto setup_nic_dev_free;
3653 /* Register ethtool support */
3654 liquidio_set_ethtool_ops(netdev);
3655 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3656 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3658 octeon_dev->priv_flags = 0x0;
3660 if (netdev->features & NETIF_F_LRO)
3661 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3662 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3664 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3665 OCTNET_CMD_VLAN_FILTER_ENABLE);
3667 if ((debug != -1) && (debug & NETIF_MSG_HW))
3668 liquidio_set_feature(netdev,
3669 OCTNET_CMD_VERBOSE_ENABLE, 0);
3671 if (setup_link_status_change_wq(netdev))
3672 goto setup_nic_dev_free;
3674 if ((octeon_dev->fw_info.app_cap_flags &
3675 LIQUIDIO_TIME_SYNC_CAP) &&
3676 setup_sync_octeon_time_wq(netdev))
3677 goto setup_nic_dev_free;
3679 if (setup_rx_oom_poll_fn(netdev))
3680 goto setup_nic_dev_free;
3682 /* Register the network device with the OS */
3683 if (register_netdev(netdev)) {
3684 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3685 goto setup_nic_dev_free;
3688 dev_dbg(&octeon_dev->pci_dev->dev,
3689 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3690 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3691 netif_carrier_off(netdev);
3692 lio->link_changes++;
3694 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3696 /* Sending command to firmware to enable Rx checksum offload
3697 * by default at the time of setup of Liquidio driver for
3700 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3701 OCTNET_CMD_RXCSUM_ENABLE);
3702 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3703 OCTNET_CMD_TXCSUM_ENABLE);
3705 dev_dbg(&octeon_dev->pci_dev->dev,
3706 "NIC ifidx:%d Setup successful\n", i);
3708 if (octeon_dev->subsystem_id ==
3709 OCTEON_CN2350_25GB_SUBSYS_ID ||
3710 octeon_dev->subsystem_id ==
3711 OCTEON_CN2360_25GB_SUBSYS_ID) {
3712 cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
3713 octeon_dev->fw_info.ver.min,
3714 octeon_dev->fw_info.ver.rev);
3716 /* speed control unsupported in f/w older than 1.7.2 */
3717 if (cur_ver < OCT_FW_VER(1, 7, 2)) {
3718 dev_info(&octeon_dev->pci_dev->dev,
3719 "speed setting not supported by f/w.");
3720 octeon_dev->speed_setting = 25;
3721 octeon_dev->no_speed_setting = 1;
3723 liquidio_get_speed(lio);
3726 if (octeon_dev->speed_setting == 0) {
3727 octeon_dev->speed_setting = 25;
3728 octeon_dev->no_speed_setting = 1;
3731 octeon_dev->no_speed_setting = 1;
3732 octeon_dev->speed_setting = 10;
3734 octeon_dev->speed_boot = octeon_dev->speed_setting;
3736 /* don't read FEC setting if unsupported by f/w (see above) */
3737 if (octeon_dev->speed_boot == 25 &&
3738 !octeon_dev->no_speed_setting) {
3739 liquidio_get_fec(lio);
3740 octeon_dev->props[lio->ifidx].fec_boot =
3741 octeon_dev->props[lio->ifidx].fec;
3745 devlink = devlink_alloc(&liquidio_devlink_ops,
3746 sizeof(struct lio_devlink_priv));
3748 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3749 goto setup_nic_dev_free;
3752 lio_devlink = devlink_priv(devlink);
3753 lio_devlink->oct = octeon_dev;
3755 if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) {
3756 devlink_free(devlink);
3757 dev_err(&octeon_dev->pci_dev->dev,
3758 "devlink registration failed\n");
3759 goto setup_nic_dev_free;
3762 octeon_dev->devlink = devlink;
3763 octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3770 dev_err(&octeon_dev->pci_dev->dev,
3771 "NIC ifidx:%d Setup failed\n", i);
3772 liquidio_destroy_nic_device(octeon_dev, i);
3780 #ifdef CONFIG_PCI_IOV
3781 static int octeon_enable_sriov(struct octeon_device *oct)
3783 unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3784 struct pci_dev *vfdev;
3788 if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3789 err = pci_enable_sriov(oct->pci_dev,
3790 oct->sriov_info.num_vfs_alloced);
3792 dev_err(&oct->pci_dev->dev,
3793 "OCTEON: Failed to enable PCI sriov: %d\n",
3795 oct->sriov_info.num_vfs_alloced = 0;
3798 oct->sriov_info.sriov_enabled = 1;
3800 /* init lookup table that maps DPI ring number to VF pci_dev
3804 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3805 OCTEON_CN23XX_VF_VID, NULL);
3807 if (vfdev->is_virtfn &&
3808 (vfdev->physfn == oct->pci_dev)) {
3809 oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3811 u += oct->sriov_info.rings_per_vf;
3813 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3814 OCTEON_CN23XX_VF_VID, vfdev);
3818 return num_vfs_alloced;
3821 static int lio_pci_sriov_disable(struct octeon_device *oct)
3825 if (pci_vfs_assigned(oct->pci_dev)) {
3826 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3830 pci_disable_sriov(oct->pci_dev);
3833 while (u < MAX_POSSIBLE_VFS) {
3834 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3835 u += oct->sriov_info.rings_per_vf;
3838 oct->sriov_info.num_vfs_alloced = 0;
3839 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3845 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3847 struct octeon_device *oct = pci_get_drvdata(dev);
3850 if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3851 (oct->sriov_info.sriov_enabled)) {
3852 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3853 oct->pf_num, num_vfs);
3858 lio_vf_rep_destroy(oct);
3859 ret = lio_pci_sriov_disable(oct);
3860 } else if (num_vfs > oct->sriov_info.max_vfs) {
3861 dev_err(&oct->pci_dev->dev,
3862 "OCTEON: Max allowed VFs:%d user requested:%d",
3863 oct->sriov_info.max_vfs, num_vfs);
3866 oct->sriov_info.num_vfs_alloced = num_vfs;
3867 ret = octeon_enable_sriov(oct);
3868 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3869 oct->pf_num, num_vfs);
3870 ret = lio_vf_rep_create(oct);
3872 dev_info(&oct->pci_dev->dev,
3873 "vf representor create failed");
3881 * liquidio_init_nic_module - initialize the NIC
3882 * @oct: octeon device
3884 * This initialization routine is called once the Octeon device application is
3887 static int liquidio_init_nic_module(struct octeon_device *oct)
3890 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3892 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3894 /* only default iq and oq were initialized
3895 * initialize the rest as well
3897 /* run port_config command for each port */
3898 oct->ifcount = num_nic_ports;
3900 memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3902 for (i = 0; i < MAX_OCTEON_LINKS; i++)
3903 oct->props[i].gmxport = -1;
3905 retval = setup_nic_devices(oct);
3907 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3908 goto octnet_init_failure;
3911 /* Call vf_rep_modinit if the firmware is switchdev capable
3912 * and do it from the first liquidio function probed.
3914 if (!oct->octeon_id &&
3915 oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
3916 retval = lio_vf_rep_modinit();
3918 liquidio_stop_nic_module(oct);
3919 goto octnet_init_failure;
3923 liquidio_ptp_init(oct);
3925 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3929 octnet_init_failure:
3937 * nic_starter - finish init
3938 * @work: work struct work_struct
3940 * starter callback that invokes the remaining initialization work after the NIC is up and running.
3942 static void nic_starter(struct work_struct *work)
3944 struct octeon_device *oct;
3945 struct cavium_wk *wk = (struct cavium_wk *)work;
3947 oct = (struct octeon_device *)wk->ctxptr;
3949 if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3952 /* If the status of the device is CORE_OK, the core
3953 * application has reported its application type. Call
3954 * any registered handlers now and move to the RUNNING
3957 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3958 schedule_delayed_work(&oct->nic_poll_work.work,
3959 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3963 atomic_set(&oct->status, OCT_DEV_RUNNING);
3965 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3966 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3968 if (liquidio_init_nic_module(oct))
3969 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3971 handshake[oct->octeon_id].started_ok = 1;
3973 dev_err(&oct->pci_dev->dev,
3974 "Unexpected application running on NIC (%d). Check firmware.\n",
3978 complete(&handshake[oct->octeon_id].started);
3982 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
3984 struct octeon_device *oct = (struct octeon_device *)buf;
3985 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3986 int i, notice, vf_idx;
3990 notice = recv_pkt->rh.r.ossp;
3991 data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
3993 /* the first 64-bit word of data is the vf_num */
3995 octeon_swap_8B_data(&vf_num, 1);
3996 vf_idx = (int)vf_num - 1;
3998 cores_crashed = READ_ONCE(oct->cores_crashed);
4000 if (notice == VF_DRV_LOADED) {
4001 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
4002 oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
4003 dev_info(&oct->pci_dev->dev,
4004 "driver for VF%d was loaded\n", vf_idx);
4006 try_module_get(THIS_MODULE);
4008 } else if (notice == VF_DRV_REMOVED) {
4009 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
4010 oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
4011 dev_info(&oct->pci_dev->dev,
4012 "driver for VF%d was removed\n", vf_idx);
4014 module_put(THIS_MODULE);
4016 } else if (notice == VF_DRV_MACADDR_CHANGED) {
4017 u8 *b = (u8 *)&data[1];
4019 oct->sriov_info.vf_macaddr[vf_idx] = data[1];
4020 dev_info(&oct->pci_dev->dev,
4021 "VF driver changed VF%d's MAC address to %pM\n",
4025 for (i = 0; i < recv_pkt->buffer_count; i++)
4026 recv_buffer_free(recv_pkt->buffer_ptr[i]);
4027 octeon_free_recv_info(recv_info);
4033 * octeon_device_init - Device initialization for each Octeon device that is probed
4034 * @octeon_dev: octeon device
4036 static int octeon_device_init(struct octeon_device *octeon_dev)
4039 char bootcmd[] = "\n";
4040 char *dbg_enb = NULL;
4041 enum lio_fw_state fw_state;
4042 struct octeon_device_priv *oct_priv =
4043 (struct octeon_device_priv *)octeon_dev->priv;
4044 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4046 /* Enable access to the octeon device and make its DMA capability
4049 if (octeon_pci_os_setup(octeon_dev))
4052 atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4054 /* Identify the Octeon type and map the BAR address space. */
4055 if (octeon_chip_specific_setup(octeon_dev)) {
4056 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4060 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4062 /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4063 * since that is what is required for the reference to be removed
4064 * during de-initialization (see 'octeon_destroy_resources').
4066 octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4067 PCI_SLOT(octeon_dev->pci_dev->devfn),
4068 PCI_FUNC(octeon_dev->pci_dev->devfn),
4071 octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4073 /* CN23XX supports preloaded firmware if the following is true:
4075 * The adapter indicates that firmware is currently running AND
4076 * 'fw_type' is 'auto'.
4078 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4080 if (OCTEON_CN23XX_PF(octeon_dev) &&
4081 cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4082 atomic_cmpxchg(octeon_dev->adapter_fw_state,
4083 FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4086 /* If loading firmware, only first device of adapter needs to do so. */
4087 fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4088 FW_NEEDS_TO_BE_LOADED,
4089 FW_IS_BEING_LOADED);
4091 /* Here, [local variable] 'fw_state' is set to one of:
4093 * FW_IS_PRELOADED: No firmware is to be loaded (see above)
4094 * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4095 * firmware to the adapter.
4096 * FW_IS_BEING_LOADED: The driver's second instance will not load
4097 * firmware to the adapter.
4100 /* Prior to f/w load, perform a soft reset of the Octeon device;
4101 * if error resetting, return w/error.
4103 if (fw_state == FW_NEEDS_TO_BE_LOADED)
4104 if (octeon_dev->fn_list.soft_reset(octeon_dev))
4107 /* Initialize the dispatch mechanism used to push packets arriving on
4108 * Octeon Output queues.
4110 if (octeon_init_dispatch_list(octeon_dev))
4113 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4114 OPCODE_NIC_CORE_DRV_ACTIVE,
4115 octeon_core_drv_init,
4118 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4119 OPCODE_NIC_VF_DRV_NOTICE,
4120 octeon_recv_vf_drv_notice, octeon_dev);
4121 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4122 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4123 schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4124 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4126 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4128 if (octeon_set_io_queues_off(octeon_dev)) {
4129 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4133 if (OCTEON_CN23XX_PF(octeon_dev)) {
4134 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4136 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4141 /* Initialize soft command buffer pool
4143 if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4144 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4147 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4149 /* Setup the data structures that manage this Octeon's Input queues. */
4150 if (octeon_setup_instr_queues(octeon_dev)) {
4151 dev_err(&octeon_dev->pci_dev->dev,
4152 "instruction queue initialization failed\n");
4155 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4157 /* Initialize lists to manage the requests of different types that
4158 * arrive from user & kernel applications for this octeon device.
4160 if (octeon_setup_response_list(octeon_dev)) {
4161 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4164 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4166 if (octeon_setup_output_queues(octeon_dev)) {
4167 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4171 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4173 if (OCTEON_CN23XX_PF(octeon_dev)) {
4174 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4175 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4178 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4180 if (octeon_allocate_ioq_vector
4182 octeon_dev->sriov_info.num_pf_rings)) {
4183 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4186 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4189 /* The input and output queue registers were setup earlier (the
4190 * queues were not enabled). Any additional registers
4191 * that need to be programmed should be done now.
4193 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4195 dev_err(&octeon_dev->pci_dev->dev,
4196 "Failed to configure device registers\n");
4201 /* Initialize the tasklet that handles output queue packet processing.*/
4202 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4203 tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh);
4205 /* Setup the interrupt handler and record the INT SUM register address
4207 if (octeon_setup_interrupt(octeon_dev,
4208 octeon_dev->sriov_info.num_pf_rings))
4211 /* Enable Octeon device interrupts */
4212 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4214 atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4216 /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4217 * the output queue is enabled.
4218 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4219 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4220 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4221 * before any credits have been issued, causing the ring to be reset
4222 * (and the f/w appear to never have started).
4224 for (j = 0; j < octeon_dev->num_oqs; j++)
4225 writel(octeon_dev->droq[j]->max_count,
4226 octeon_dev->droq[j]->pkts_credit_reg);
4228 /* Enable the input and output queues for this Octeon device */
4229 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4231 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4235 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4237 if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4238 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4240 dev_info(&octeon_dev->pci_dev->dev,
4241 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4244 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4246 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4247 while (!ddr_timeout) {
4248 set_current_state(TASK_INTERRUPTIBLE);
4249 if (schedule_timeout(HZ / 10)) {
4250 /* user probably pressed Control-C */
4254 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4256 dev_err(&octeon_dev->pci_dev->dev,
4257 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4262 if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4263 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4267 /* Divert uboot to take commands from host instead. */
4268 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4270 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4271 ret = octeon_init_consoles(octeon_dev);
4273 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4276 /* If console debug enabled, specify empty string to use default
4277 * enablement ELSE specify NULL string for 'disabled'.
4279 dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4280 ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4282 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4284 } else if (octeon_console_debug_enabled(0)) {
4285 /* If console was added AND we're logging console output
4286 * then set our console print function.
4288 octeon_dev->console[0].print = octeon_dbg_console_print;
4291 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4293 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4294 ret = load_firmware(octeon_dev);
4296 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4300 atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4303 handshake[octeon_dev->octeon_id].init_ok = 1;
4304 complete(&handshake[octeon_dev->octeon_id].init);
4306 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4307 oct_priv->dev = octeon_dev;
4313 * octeon_dbg_console_print - Debug console print function
4314 * @oct: octeon device
4315 * @console_num: console number
4316 * @prefix: first portion of line to display
4317 * @suffix: second portion of line to display
4319 * The OCTEON debug console outputs entire lines (excluding '\n').
4320 * Normally, the line will be passed in the 'prefix' parameter.
4321 * However, due to buffering, it is possible for a line to be split into two
4322 * parts, in which case they will be passed as the 'prefix' parameter and
4323 * 'suffix' parameter.
4325 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4326 char *prefix, char *suffix)
4328 if (prefix && suffix)
4329 dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4332 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4334 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4340 * liquidio_exit - Exits the module
4342 static void __exit liquidio_exit(void)
4344 liquidio_deinit_pci();
4346 pr_info("LiquidIO network module is now unloaded\n");
4349 module_init(liquidio_init);
4350 module_exit(liquidio_exit);