1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/firmware.h>
22 #include <net/vxlan.h>
23 #include <linux/kthread.h>
24 #include "liquidio_common.h"
25 #include "octeon_droq.h"
26 #include "octeon_iq.h"
27 #include "response_manager.h"
28 #include "octeon_device.h"
29 #include "octeon_nic.h"
30 #include "octeon_main.h"
31 #include "octeon_network.h"
32 #include "cn66xx_regs.h"
33 #include "cn66xx_device.h"
34 #include "cn68xx_device.h"
35 #include "cn23xx_pf_device.h"
36 #include "liquidio_image.h"
37 #include "lio_vf_rep.h"
39 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
40 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
41 MODULE_LICENSE("GPL");
42 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
43 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
45 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
47 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
49 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
51 static int ddr_timeout = 10000;
52 module_param(ddr_timeout, int, 0644);
53 MODULE_PARM_DESC(ddr_timeout,
54 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
56 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
58 static int debug = -1;
59 module_param(debug, int, 0644);
60 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
62 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
63 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
64 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
66 static u32 console_bitmask;
67 module_param(console_bitmask, int, 0644);
68 MODULE_PARM_DESC(console_bitmask,
69 "Bitmask indicating which consoles have debug output redirected to syslog.");
72 * octeon_console_debug_enabled - determines if a given console has debug enabled.
73 * @console: console to check
74 * Return: 1 = enabled. 0 otherwise
76 static int octeon_console_debug_enabled(u32 console)
78 return (console_bitmask >> (console)) & 0x1;
81 /* Polling interval for determining when NIC application is alive */
82 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
84 /* runtime link query interval */
85 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
86 /* update localtime to octeon firmware every 60 seconds.
87 * make firmware to use same time reference, so that it will be easy to
88 * correlate firmware logged events/errors with host events, for debugging.
90 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
92 /* time to wait for possible in-flight requests in milliseconds */
93 #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
95 struct oct_link_status_resp {
97 struct oct_link_info link_info;
101 struct oct_timestamp_resp {
107 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
112 #ifdef __BIG_ENDIAN_BITFIELD
124 /* Octeon device properties to be used by the NIC module.
125 * Each octeon device in the system will be represented
126 * by this structure in the NIC module.
129 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
130 #define OCTNIC_GSO_MAX_SIZE \
131 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
134 struct completion init;
135 struct completion started;
136 struct pci_dev *pci_dev;
141 #ifdef CONFIG_PCI_IOV
142 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
145 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
146 char *prefix, char *suffix);
148 static int octeon_device_init(struct octeon_device *);
149 static int liquidio_stop(struct net_device *netdev);
150 static void liquidio_remove(struct pci_dev *pdev);
151 static int liquidio_probe(struct pci_dev *pdev,
152 const struct pci_device_id *ent);
153 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
156 static struct handshake handshake[MAX_OCTEON_DEVICES];
157 static struct completion first_stage;
159 static void octeon_droq_bh(struct tasklet_struct *t)
163 struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t,
165 struct octeon_device *oct = oct_priv->dev;
167 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
168 if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
170 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
172 lio_enable_irq(oct->droq[q_no], NULL);
174 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
175 /* set time and cnt interrupt thresholds for this DROQ
178 int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
181 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
184 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
189 tasklet_schedule(&oct_priv->droq_tasklet);
192 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
194 struct octeon_device_priv *oct_priv =
195 (struct octeon_device_priv *)oct->priv;
196 int retry = 100, pkt_cnt = 0, pending_pkts = 0;
202 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
203 if (!(oct->io_qmask.oq & BIT_ULL(i)))
205 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
208 pending_pkts += pkt_cnt;
209 tasklet_schedule(&oct_priv->droq_tasklet);
212 schedule_timeout_uninterruptible(1);
214 } while (retry-- && pending_pkts);
220 * force_io_queues_off - Forces all IO queues off on a given device
221 * @oct: Pointer to Octeon device
223 static void force_io_queues_off(struct octeon_device *oct)
225 if ((oct->chip_id == OCTEON_CN66XX) ||
226 (oct->chip_id == OCTEON_CN68XX)) {
227 /* Reset the Enable bits for Input Queues. */
228 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
230 /* Reset the Enable bits for Output Queues. */
231 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
236 * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc
237 * @oct: Pointer to Octeon device
239 static inline void pcierror_quiesce_device(struct octeon_device *oct)
243 /* Disable the input and output queues now. No more packets will
244 * arrive from Octeon, but we should wait for all packet processing
247 force_io_queues_off(oct);
249 /* To allow for in-flight requests */
250 schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
252 if (wait_for_pending_requests(oct))
253 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
255 /* Force all requests waiting to be fetched by OCTEON to complete. */
256 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
257 struct octeon_instr_queue *iq;
259 if (!(oct->io_qmask.iq & BIT_ULL(i)))
261 iq = oct->instr_queue[i];
263 if (atomic_read(&iq->instr_pending)) {
264 spin_lock_bh(&iq->lock);
266 iq->octeon_read_index = iq->host_write_index;
267 iq->stats.instr_processed +=
268 atomic_read(&iq->instr_pending);
269 lio_process_iq_request_list(oct, iq, 0);
270 spin_unlock_bh(&iq->lock);
274 /* Force all pending ordered list requests to time out. */
275 lio_process_ordered_list(oct, 1);
277 /* We do not need to wait for output queue packets to be processed. */
281 * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status
282 * @dev: Pointer to PCI device
284 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
289 pr_info("%s :\n", __func__);
291 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
292 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
293 if (dev->error_state == pci_channel_io_normal)
294 status &= ~mask; /* Clear corresponding nonfatal bits */
296 status &= mask; /* Clear corresponding fatal bits */
297 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
301 * stop_pci_io - Stop all PCI IO to a given device
302 * @oct: Pointer to Octeon device
304 static void stop_pci_io(struct octeon_device *oct)
306 /* No more instructions will be forwarded. */
307 atomic_set(&oct->status, OCT_DEV_IN_RESET);
309 pci_disable_device(oct->pci_dev);
311 /* Disable interrupts */
312 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
314 pcierror_quiesce_device(oct);
316 /* Release the interrupt line */
317 free_irq(oct->pci_dev->irq, oct);
319 if (oct->flags & LIO_FLAG_MSI_ENABLED)
320 pci_disable_msi(oct->pci_dev);
322 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
323 lio_get_state_string(&oct->status));
325 /* making it a common function for all OCTEON models */
326 cleanup_aer_uncorrect_error_status(oct->pci_dev);
330 * liquidio_pcie_error_detected - called when PCI error is detected
331 * @pdev: Pointer to PCI device
332 * @state: The current pci connection state
334 * This function is called after a PCI bus error affecting
335 * this device has been detected.
337 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
338 pci_channel_state_t state)
340 struct octeon_device *oct = pci_get_drvdata(pdev);
342 /* Non-correctable Non-fatal errors */
343 if (state == pci_channel_io_normal) {
344 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
345 cleanup_aer_uncorrect_error_status(oct->pci_dev);
346 return PCI_ERS_RESULT_CAN_RECOVER;
349 /* Non-correctable Fatal errors */
350 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
353 /* Always return a DISCONNECT. There is no support for recovery but only
354 * for a clean shutdown.
356 return PCI_ERS_RESULT_DISCONNECT;
360 * liquidio_pcie_mmio_enabled - mmio handler
361 * @pdev: Pointer to PCI device
363 static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev)
365 /* We should never hit this since we never ask for a reset for a Fatal
366 * Error. We always return DISCONNECT in io_error above.
367 * But play safe and return RECOVERED for now.
369 return PCI_ERS_RESULT_RECOVERED;
373 * liquidio_pcie_slot_reset - called after the pci bus has been reset.
374 * @pdev: Pointer to PCI device
376 * Restart the card from scratch, as if from a cold-boot. Implementation
377 * resembles the first-half of the octeon_resume routine.
379 static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev)
381 /* We should never hit this since we never ask for a reset for a Fatal
382 * Error. We always return DISCONNECT in io_error above.
383 * But play safe and return RECOVERED for now.
385 return PCI_ERS_RESULT_RECOVERED;
389 * liquidio_pcie_resume - called when traffic can start flowing again.
390 * @pdev: Pointer to PCI device
392 * This callback is called when the error recovery driver tells us that
393 * its OK to resume normal operation. Implementation resembles the
394 * second-half of the octeon_resume routine.
396 static void liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev)
398 /* Nothing to be done here. */
401 #define liquidio_suspend NULL
402 #define liquidio_resume NULL
404 /* For PCI-E Advanced Error Recovery (AER) Interface */
405 static const struct pci_error_handlers liquidio_err_handler = {
406 .error_detected = liquidio_pcie_error_detected,
407 .mmio_enabled = liquidio_pcie_mmio_enabled,
408 .slot_reset = liquidio_pcie_slot_reset,
409 .resume = liquidio_pcie_resume,
412 static const struct pci_device_id liquidio_pci_tbl[] = {
414 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
417 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
420 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
426 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
428 static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume);
430 static struct pci_driver liquidio_pci_driver = {
432 .id_table = liquidio_pci_tbl,
433 .probe = liquidio_probe,
434 .remove = liquidio_remove,
435 .err_handler = &liquidio_err_handler, /* For AER */
436 .driver.pm = &liquidio_pm_ops,
437 #ifdef CONFIG_PCI_IOV
438 .sriov_configure = liquidio_enable_sriov,
443 * liquidio_init_pci - register PCI driver
445 static int liquidio_init_pci(void)
447 return pci_register_driver(&liquidio_pci_driver);
451 * liquidio_deinit_pci - unregister PCI driver
453 static void liquidio_deinit_pci(void)
455 pci_unregister_driver(&liquidio_pci_driver);
459 * check_txq_status - Check Tx queue status, and take appropriate action
460 * @lio: per-network private data
461 * Return: 0 if full, number of queues woken up otherwise
463 static inline int check_txq_status(struct lio *lio)
465 int numqs = lio->netdev->real_num_tx_queues;
469 /* check each sub-queue state */
470 for (q = 0; q < numqs; q++) {
471 iq = lio->linfo.txpciq[q %
472 lio->oct_dev->num_iqs].s.q_no;
473 if (octnet_iq_is_full(lio->oct_dev, iq))
475 if (__netif_subqueue_stopped(lio->netdev, q)) {
476 netif_wake_subqueue(lio->netdev, q);
477 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
487 * print_link_info - Print link information
488 * @netdev: network device
490 static void print_link_info(struct net_device *netdev)
492 struct lio *lio = GET_LIO(netdev);
494 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
495 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
496 struct oct_link_info *linfo = &lio->linfo;
498 if (linfo->link.s.link_up) {
499 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
501 (linfo->link.s.duplex) ? "Full" : "Half");
503 netif_info(lio, link, lio->netdev, "Link Down\n");
509 * octnet_link_status_change - Routine to notify MTU change
510 * @work: work_struct data structure
512 static void octnet_link_status_change(struct work_struct *work)
514 struct cavium_wk *wk = (struct cavium_wk *)work;
515 struct lio *lio = (struct lio *)wk->ctxptr;
517 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
518 * this API is invoked only when new max-MTU of the interface is
519 * less than current MTU.
522 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
527 * setup_link_status_change_wq - Sets up the mtu status change work
528 * @netdev: network device
530 static inline int setup_link_status_change_wq(struct net_device *netdev)
532 struct lio *lio = GET_LIO(netdev);
533 struct octeon_device *oct = lio->oct_dev;
535 lio->link_status_wq.wq = alloc_workqueue("link-status",
537 if (!lio->link_status_wq.wq) {
538 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
541 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
542 octnet_link_status_change);
543 lio->link_status_wq.wk.ctxptr = lio;
548 static inline void cleanup_link_status_change_wq(struct net_device *netdev)
550 struct lio *lio = GET_LIO(netdev);
552 if (lio->link_status_wq.wq) {
553 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
554 destroy_workqueue(lio->link_status_wq.wq);
559 * update_link_status - Update link status
560 * @netdev: network device
561 * @ls: link status structure
563 * Called on receipt of a link status response from the core application to
564 * update each interface's link status.
566 static inline void update_link_status(struct net_device *netdev,
567 union oct_link_status *ls)
569 struct lio *lio = GET_LIO(netdev);
570 int changed = (lio->linfo.link.u64 != ls->u64);
571 int current_max_mtu = lio->linfo.link.s.mtu;
572 struct octeon_device *oct = lio->oct_dev;
574 dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
575 __func__, lio->linfo.link.u64, ls->u64);
576 lio->linfo.link.u64 = ls->u64;
578 if ((lio->intf_open) && (changed)) {
579 print_link_info(netdev);
582 if (lio->linfo.link.s.link_up) {
583 dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
584 netif_carrier_on(netdev);
587 dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
588 netif_carrier_off(netdev);
591 if (lio->linfo.link.s.mtu != current_max_mtu) {
592 netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
593 current_max_mtu, lio->linfo.link.s.mtu);
594 netdev->max_mtu = lio->linfo.link.s.mtu;
596 if (lio->linfo.link.s.mtu < netdev->mtu) {
597 dev_warn(&oct->pci_dev->dev,
598 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
599 netdev->mtu, lio->linfo.link.s.mtu);
600 queue_delayed_work(lio->link_status_wq.wq,
601 &lio->link_status_wq.wk.work, 0);
607 * lio_sync_octeon_time - send latest localtime to octeon firmware so that
608 * firmware will correct it's time, in case there is a time skew
610 * @work: work scheduled to send time update to octeon firmware
612 static void lio_sync_octeon_time(struct work_struct *work)
614 struct cavium_wk *wk = (struct cavium_wk *)work;
615 struct lio *lio = (struct lio *)wk->ctxptr;
616 struct octeon_device *oct = lio->oct_dev;
617 struct octeon_soft_command *sc;
618 struct timespec64 ts;
622 sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0);
624 dev_err(&oct->pci_dev->dev,
625 "Failed to sync time to octeon: soft command allocation failed\n");
629 lt = (struct lio_time *)sc->virtdptr;
631 /* Get time of the day */
632 ktime_get_real_ts64(&ts);
634 lt->nsec = ts.tv_nsec;
635 octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
637 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
638 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
639 OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
641 init_completion(&sc->complete);
642 sc->sc_status = OCTEON_REQUEST_PENDING;
644 ret = octeon_send_soft_command(oct, sc);
645 if (ret == IQ_SEND_FAILED) {
646 dev_err(&oct->pci_dev->dev,
647 "Failed to sync time to octeon: failed to send soft command\n");
648 octeon_free_soft_command(oct, sc);
650 WRITE_ONCE(sc->caller_is_done, true);
653 queue_delayed_work(lio->sync_octeon_time_wq.wq,
654 &lio->sync_octeon_time_wq.wk.work,
655 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
659 * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware
661 * @netdev: network device which should send time update to firmware
663 static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
665 struct lio *lio = GET_LIO(netdev);
666 struct octeon_device *oct = lio->oct_dev;
668 lio->sync_octeon_time_wq.wq =
669 alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
670 if (!lio->sync_octeon_time_wq.wq) {
671 dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
674 INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
675 lio_sync_octeon_time);
676 lio->sync_octeon_time_wq.wk.ctxptr = lio;
677 queue_delayed_work(lio->sync_octeon_time_wq.wq,
678 &lio->sync_octeon_time_wq.wk.work,
679 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
685 * cleanup_sync_octeon_time_wq - destroy wq
687 * @netdev: network device which should send time update to firmware
689 * Stop scheduling and destroy the work created to periodically update local
690 * time to octeon firmware.
692 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
694 struct lio *lio = GET_LIO(netdev);
695 struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
698 cancel_delayed_work_sync(&time_wq->wk.work);
699 destroy_workqueue(time_wq->wq);
703 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
705 struct octeon_device *other_oct;
707 other_oct = lio_get_device(oct->octeon_id + 1);
709 if (other_oct && other_oct->pci_dev) {
710 int oct_busnum, other_oct_busnum;
712 oct_busnum = oct->pci_dev->bus->number;
713 other_oct_busnum = other_oct->pci_dev->bus->number;
715 if (oct_busnum == other_oct_busnum) {
716 int oct_slot, other_oct_slot;
718 oct_slot = PCI_SLOT(oct->pci_dev->devfn);
719 other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
721 if (oct_slot == other_oct_slot)
729 static void disable_all_vf_links(struct octeon_device *oct)
731 struct net_device *netdev;
737 max_vfs = oct->sriov_info.max_vfs;
739 for (i = 0; i < oct->ifcount; i++) {
740 netdev = oct->props[i].netdev;
744 for (vf = 0; vf < max_vfs; vf++)
745 liquidio_set_vf_link_state(netdev, vf,
746 IFLA_VF_LINK_STATE_DISABLE);
750 static int liquidio_watchdog(void *param)
752 bool err_msg_was_printed[LIO_MAX_CORES];
753 u16 mask_of_crashed_or_stuck_cores = 0;
754 bool all_vf_links_are_disabled = false;
755 struct octeon_device *oct = param;
756 struct octeon_device *other_oct;
757 #ifdef CONFIG_MODULE_UNLOAD
758 long refcount, vfs_referencing_pf;
759 u64 vfs_mask1, vfs_mask2;
763 memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
765 while (!kthread_should_stop()) {
766 /* sleep for a couple of seconds so that we don't hog the CPU */
767 set_current_state(TASK_INTERRUPTIBLE);
768 schedule_timeout(msecs_to_jiffies(2000));
770 mask_of_crashed_or_stuck_cores =
771 (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
773 if (!mask_of_crashed_or_stuck_cores)
776 WRITE_ONCE(oct->cores_crashed, true);
777 other_oct = get_other_octeon_device(oct);
779 WRITE_ONCE(other_oct->cores_crashed, true);
781 for (core = 0; core < LIO_MAX_CORES; core++) {
782 bool core_crashed_or_got_stuck;
784 core_crashed_or_got_stuck =
785 (mask_of_crashed_or_stuck_cores
788 if (core_crashed_or_got_stuck &&
789 !err_msg_was_printed[core]) {
790 dev_err(&oct->pci_dev->dev,
791 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
793 err_msg_was_printed[core] = true;
797 if (all_vf_links_are_disabled)
800 disable_all_vf_links(oct);
801 disable_all_vf_links(other_oct);
802 all_vf_links_are_disabled = true;
804 #ifdef CONFIG_MODULE_UNLOAD
805 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
806 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
808 vfs_referencing_pf = hweight64(vfs_mask1);
809 vfs_referencing_pf += hweight64(vfs_mask2);
811 refcount = module_refcount(THIS_MODULE);
812 if (refcount >= vfs_referencing_pf) {
813 while (vfs_referencing_pf) {
814 module_put(THIS_MODULE);
815 vfs_referencing_pf--;
825 * liquidio_probe - PCI probe handler
826 * @pdev: PCI device structure
830 liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *ent)
832 struct octeon_device *oct_dev = NULL;
833 struct handshake *hs;
835 oct_dev = octeon_allocate_device(pdev->device,
836 sizeof(struct octeon_device_priv));
838 dev_err(&pdev->dev, "Unable to allocate device\n");
842 if (pdev->device == OCTEON_CN23XX_PF_VID)
843 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
845 /* Enable PTP for 6XXX Device */
846 if (((pdev->device == OCTEON_CN66XX) ||
847 (pdev->device == OCTEON_CN68XX)))
848 oct_dev->ptp_enable = true;
850 oct_dev->ptp_enable = false;
852 dev_info(&pdev->dev, "Initializing device %x:%x.\n",
853 (u32)pdev->vendor, (u32)pdev->device);
855 /* Assign octeon_device for this device to the private data area. */
856 pci_set_drvdata(pdev, oct_dev);
858 /* set linux specific device pointer */
859 oct_dev->pci_dev = (void *)pdev;
861 oct_dev->subsystem_id = pdev->subsystem_vendor |
862 (pdev->subsystem_device << 16);
864 hs = &handshake[oct_dev->octeon_id];
865 init_completion(&hs->init);
866 init_completion(&hs->started);
869 if (oct_dev->octeon_id == 0)
870 /* first LiquidIO NIC is detected */
871 complete(&first_stage);
873 if (octeon_device_init(oct_dev)) {
875 liquidio_remove(pdev);
879 if (OCTEON_CN23XX_PF(oct_dev)) {
880 u8 bus, device, function;
882 if (atomic_read(oct_dev->adapter_refcount) == 1) {
883 /* Each NIC gets one watchdog kernel thread. The first
884 * PF (of each NIC) that gets pci_driver->probe()'d
885 * creates that thread.
887 bus = pdev->bus->number;
888 device = PCI_SLOT(pdev->devfn);
889 function = PCI_FUNC(pdev->devfn);
890 oct_dev->watchdog_task = kthread_run(liquidio_watchdog,
892 "liowd/%02hhx:%02hhx.%hhx",
893 bus, device, function);
894 if (IS_ERR(oct_dev->watchdog_task)) {
895 oct_dev->watchdog_task = NULL;
896 dev_err(&oct_dev->pci_dev->dev,
897 "failed to create kernel_thread\n");
898 liquidio_remove(pdev);
904 oct_dev->rx_pause = 1;
905 oct_dev->tx_pause = 1;
907 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
912 static bool fw_type_is_auto(void)
914 return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
915 sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
919 * octeon_pci_flr - PCI FLR for each Octeon device.
920 * @oct: octeon device
922 static void octeon_pci_flr(struct octeon_device *oct)
926 pci_save_state(oct->pci_dev);
928 pci_cfg_access_lock(oct->pci_dev);
930 /* Quiesce the device completely */
931 pci_write_config_word(oct->pci_dev, PCI_COMMAND,
932 PCI_COMMAND_INTX_DISABLE);
934 rc = __pci_reset_function_locked(oct->pci_dev);
937 dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
940 pci_cfg_access_unlock(oct->pci_dev);
942 pci_restore_state(oct->pci_dev);
946 * octeon_destroy_resources - Destroy resources associated with octeon device
947 * @oct: octeon device
949 static void octeon_destroy_resources(struct octeon_device *oct)
952 struct msix_entry *msix_entries;
953 struct octeon_device_priv *oct_priv =
954 (struct octeon_device_priv *)oct->priv;
956 struct handshake *hs;
958 switch (atomic_read(&oct->status)) {
959 case OCT_DEV_RUNNING:
960 case OCT_DEV_CORE_OK:
962 /* No more instructions will be forwarded. */
963 atomic_set(&oct->status, OCT_DEV_IN_RESET);
965 oct->app_mode = CVM_DRV_INVALID_APP;
966 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
967 lio_get_state_string(&oct->status));
969 schedule_timeout_uninterruptible(HZ / 10);
972 case OCT_DEV_HOST_OK:
974 case OCT_DEV_CONSOLE_INIT_DONE:
975 /* Remove any consoles */
976 octeon_remove_consoles(oct);
979 case OCT_DEV_IO_QUEUES_DONE:
980 if (lio_wait_for_instr_fetch(oct))
981 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
983 if (wait_for_pending_requests(oct))
984 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
986 /* Disable the input and output queues now. No more packets will
987 * arrive from Octeon, but we should wait for all packet
988 * processing to finish.
990 oct->fn_list.disable_io_queues(oct);
992 if (lio_wait_for_oq_pkts(oct))
993 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
995 /* Force all requests waiting to be fetched by OCTEON to
998 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
999 struct octeon_instr_queue *iq;
1001 if (!(oct->io_qmask.iq & BIT_ULL(i)))
1003 iq = oct->instr_queue[i];
1005 if (atomic_read(&iq->instr_pending)) {
1006 spin_lock_bh(&iq->lock);
1008 iq->octeon_read_index = iq->host_write_index;
1009 iq->stats.instr_processed +=
1010 atomic_read(&iq->instr_pending);
1011 lio_process_iq_request_list(oct, iq, 0);
1012 spin_unlock_bh(&iq->lock);
1016 lio_process_ordered_list(oct, 1);
1017 octeon_free_sc_done_list(oct);
1018 octeon_free_sc_zombie_list(oct);
1021 case OCT_DEV_INTR_SET_DONE:
1022 /* Disable interrupts */
1023 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1026 msix_entries = (struct msix_entry *)oct->msix_entries;
1027 for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1028 if (oct->ioq_vector[i].vector) {
1029 /* clear the affinity_cpumask */
1030 irq_set_affinity_hint(
1031 msix_entries[i].vector,
1033 free_irq(msix_entries[i].vector,
1034 &oct->ioq_vector[i]);
1035 oct->ioq_vector[i].vector = 0;
1038 /* non-iov vector's argument is oct struct */
1039 free_irq(msix_entries[i].vector, oct);
1041 pci_disable_msix(oct->pci_dev);
1042 kfree(oct->msix_entries);
1043 oct->msix_entries = NULL;
1045 /* Release the interrupt line */
1046 free_irq(oct->pci_dev->irq, oct);
1048 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1049 pci_disable_msi(oct->pci_dev);
1052 kfree(oct->irq_name_storage);
1053 oct->irq_name_storage = NULL;
1056 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1057 if (OCTEON_CN23XX_PF(oct))
1058 octeon_free_ioq_vector(oct);
1061 case OCT_DEV_MBOX_SETUP_DONE:
1062 if (OCTEON_CN23XX_PF(oct))
1063 oct->fn_list.free_mbox(oct);
1066 case OCT_DEV_IN_RESET:
1067 case OCT_DEV_DROQ_INIT_DONE:
1068 /* Wait for any pending operations */
1070 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1071 if (!(oct->io_qmask.oq & BIT_ULL(i)))
1073 octeon_delete_droq(oct, i);
1076 /* Force any pending handshakes to complete */
1077 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1081 handshake[oct->octeon_id].init_ok = 0;
1082 complete(&handshake[oct->octeon_id].init);
1083 handshake[oct->octeon_id].started_ok = 0;
1084 complete(&handshake[oct->octeon_id].started);
1089 case OCT_DEV_RESP_LIST_INIT_DONE:
1090 octeon_delete_response_list(oct);
1093 case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1094 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1095 if (!(oct->io_qmask.iq & BIT_ULL(i)))
1097 octeon_delete_instr_queue(oct, i);
1099 #ifdef CONFIG_PCI_IOV
1100 if (oct->sriov_info.sriov_enabled)
1101 pci_disable_sriov(oct->pci_dev);
1104 case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1105 octeon_free_sc_buffer_pool(oct);
1108 case OCT_DEV_DISPATCH_INIT_DONE:
1109 octeon_delete_dispatch_list(oct);
1110 cancel_delayed_work_sync(&oct->nic_poll_work.work);
1113 case OCT_DEV_PCI_MAP_DONE:
1114 refcount = octeon_deregister_device(oct);
1116 /* Soft reset the octeon device before exiting.
1117 * However, if fw was loaded from card (i.e. autoboot),
1118 * perform an FLR instead.
1119 * Implementation note: only soft-reset the device
1120 * if it is a CN6XXX OR the LAST CN23XX device.
1122 if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1123 octeon_pci_flr(oct);
1124 else if (OCTEON_CN6XXX(oct) || !refcount)
1125 oct->fn_list.soft_reset(oct);
1127 octeon_unmap_pci_barx(oct, 0);
1128 octeon_unmap_pci_barx(oct, 1);
1131 case OCT_DEV_PCI_ENABLE_DONE:
1132 /* Disable the device, releasing the PCI INT */
1133 pci_disable_device(oct->pci_dev);
1136 case OCT_DEV_BEGIN_STATE:
1137 /* Nothing to be done here either */
1139 } /* end switch (oct->status) */
1141 tasklet_kill(&oct_priv->droq_tasklet);
1145 * send_rx_ctrl_cmd - Send Rx control command
1146 * @lio: per-network private data
1147 * @start_stop: whether to start or stop
1149 static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1151 struct octeon_soft_command *sc;
1152 union octnet_cmd *ncmd;
1153 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1156 if (oct->props[lio->ifidx].rx_on == start_stop)
1159 sc = (struct octeon_soft_command *)
1160 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1163 netif_info(lio, rx_err, lio->netdev,
1164 "Failed to allocate octeon_soft_command struct\n");
1168 ncmd = (union octnet_cmd *)sc->virtdptr;
1171 ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1172 ncmd->s.param1 = start_stop;
1174 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1176 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1178 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1179 OPCODE_NIC_CMD, 0, 0, 0);
1181 init_completion(&sc->complete);
1182 sc->sc_status = OCTEON_REQUEST_PENDING;
1184 retval = octeon_send_soft_command(oct, sc);
1185 if (retval == IQ_SEND_FAILED) {
1186 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1187 octeon_free_soft_command(oct, sc);
1189 /* Sleep on a wait queue till the cond flag indicates that the
1190 * response arrived or timed-out.
1192 retval = wait_for_sc_completion_timeout(oct, sc, 0);
1196 oct->props[lio->ifidx].rx_on = start_stop;
1197 WRITE_ONCE(sc->caller_is_done, true);
1204 * liquidio_destroy_nic_device - Destroy NIC device interface
1205 * @oct: octeon device
1206 * @ifidx: which interface to destroy
1208 * Cleanup associated with each interface for an Octeon device when NIC
1209 * module is being unloaded or if initialization fails during load.
1211 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1213 struct net_device *netdev = oct->props[ifidx].netdev;
1214 struct octeon_device_priv *oct_priv =
1215 (struct octeon_device_priv *)oct->priv;
1216 struct napi_struct *napi, *n;
1220 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1225 lio = GET_LIO(netdev);
1227 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1229 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1230 liquidio_stop(netdev);
1232 if (oct->props[lio->ifidx].napi_enabled == 1) {
1233 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1236 oct->props[lio->ifidx].napi_enabled = 0;
1238 if (OCTEON_CN23XX_PF(oct))
1239 oct->droq[0]->ops.poll_mode = 0;
1243 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1244 netif_napi_del(napi);
1246 tasklet_enable(&oct_priv->droq_tasklet);
1248 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1249 unregister_netdev(netdev);
1251 cleanup_sync_octeon_time_wq(netdev);
1252 cleanup_link_status_change_wq(netdev);
1254 cleanup_rx_oom_poll_fn(netdev);
1256 lio_delete_glists(lio);
1258 free_netdev(netdev);
1260 oct->props[ifidx].gmxport = -1;
1262 oct->props[ifidx].netdev = NULL;
1266 * liquidio_stop_nic_module - Stop complete NIC functionality
1267 * @oct: octeon device
1269 static int liquidio_stop_nic_module(struct octeon_device *oct)
1274 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1275 device_lock(&oct->pci_dev->dev);
1277 devlink_unregister(oct->devlink);
1278 devlink_free(oct->devlink);
1279 oct->devlink = NULL;
1281 device_unlock(&oct->pci_dev->dev);
1283 if (!oct->ifcount) {
1284 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1288 spin_lock_bh(&oct->cmd_resp_wqlock);
1289 oct->cmd_resp_state = OCT_DRV_OFFLINE;
1290 spin_unlock_bh(&oct->cmd_resp_wqlock);
1292 lio_vf_rep_destroy(oct);
1294 for (i = 0; i < oct->ifcount; i++) {
1295 lio = GET_LIO(oct->props[i].netdev);
1296 for (j = 0; j < oct->num_oqs; j++)
1297 octeon_unregister_droq_ops(oct,
1298 lio->linfo.rxpciq[j].s.q_no);
1301 for (i = 0; i < oct->ifcount; i++)
1302 liquidio_destroy_nic_device(oct, i);
1304 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1309 * liquidio_remove - Cleans up resources at unload time
1310 * @pdev: PCI device structure
1312 static void liquidio_remove(struct pci_dev *pdev)
1314 struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1316 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1318 if (oct_dev->watchdog_task)
1319 kthread_stop(oct_dev->watchdog_task);
1321 if (!oct_dev->octeon_id &&
1322 oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1323 lio_vf_rep_modexit();
1325 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1326 liquidio_stop_nic_module(oct_dev);
1328 /* Reset the octeon device and cleanup all memory allocated for
1329 * the octeon device by driver.
1331 octeon_destroy_resources(oct_dev);
1333 dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1335 /* This octeon device has been removed. Update the global
1336 * data structure to reflect this. Free the device structure.
1338 octeon_free_device_mem(oct_dev);
1342 * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space
1343 * @oct: octeon device
1345 static int octeon_chip_specific_setup(struct octeon_device *oct)
1350 pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1351 pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1352 oct->rev_id = rev_id & 0xff;
1355 case OCTEON_CN68XX_PCIID:
1356 oct->chip_id = OCTEON_CN68XX;
1357 ret = lio_setup_cn68xx_octeon_device(oct);
1360 case OCTEON_CN66XX_PCIID:
1361 oct->chip_id = OCTEON_CN66XX;
1362 ret = lio_setup_cn66xx_octeon_device(oct);
1365 case OCTEON_CN23XX_PCIID_PF:
1366 oct->chip_id = OCTEON_CN23XX_PF_VID;
1367 ret = setup_cn23xx_octeon_pf_device(oct);
1370 #ifdef CONFIG_PCI_IOV
1372 pci_sriov_set_totalvfs(oct->pci_dev,
1373 oct->sriov_info.max_vfs);
1378 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1386 * octeon_pci_os_setup - PCI initialization for each Octeon device.
1387 * @oct: octeon device
1389 static int octeon_pci_os_setup(struct octeon_device *oct)
1391 /* setup PCI stuff first */
1392 if (pci_enable_device(oct->pci_dev)) {
1393 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1397 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1398 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1399 pci_disable_device(oct->pci_dev);
1403 /* Enable PCI DMA Master. */
1404 pci_set_master(oct->pci_dev);
1410 * free_netbuf - Unmap and free network buffer
1413 static void free_netbuf(void *buf)
1415 struct sk_buff *skb;
1416 struct octnet_buf_free_info *finfo;
1419 finfo = (struct octnet_buf_free_info *)buf;
1423 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1426 tx_buffer_free(skb);
1430 * free_netsgbuf - Unmap and free gather buffer
1433 static void free_netsgbuf(void *buf)
1435 struct octnet_buf_free_info *finfo;
1436 struct sk_buff *skb;
1438 struct octnic_gather *g;
1441 finfo = (struct octnet_buf_free_info *)buf;
1445 frags = skb_shinfo(skb)->nr_frags;
1447 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1448 g->sg[0].ptr[0], (skb->len - skb->data_len),
1453 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
1455 dma_unmap_page(&lio->oct_dev->pci_dev->dev,
1456 g->sg[(i >> 2)].ptr[(i & 3)],
1457 skb_frag_size(frag), DMA_TO_DEVICE);
1461 iq = skb_iq(lio->oct_dev, skb);
1462 spin_lock(&lio->glist_lock[iq]);
1463 list_add_tail(&g->list, &lio->glist[iq]);
1464 spin_unlock(&lio->glist_lock[iq]);
1466 tx_buffer_free(skb);
1470 * free_netsgbuf_with_resp - Unmap and free gather buffer with response
1473 static void free_netsgbuf_with_resp(void *buf)
1475 struct octeon_soft_command *sc;
1476 struct octnet_buf_free_info *finfo;
1477 struct sk_buff *skb;
1479 struct octnic_gather *g;
1482 sc = (struct octeon_soft_command *)buf;
1483 skb = (struct sk_buff *)sc->callback_arg;
1484 finfo = (struct octnet_buf_free_info *)&skb->cb;
1488 frags = skb_shinfo(skb)->nr_frags;
1490 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1491 g->sg[0].ptr[0], (skb->len - skb->data_len),
1496 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
1498 dma_unmap_page(&lio->oct_dev->pci_dev->dev,
1499 g->sg[(i >> 2)].ptr[(i & 3)],
1500 skb_frag_size(frag), DMA_TO_DEVICE);
1504 iq = skb_iq(lio->oct_dev, skb);
1506 spin_lock(&lio->glist_lock[iq]);
1507 list_add_tail(&g->list, &lio->glist[iq]);
1508 spin_unlock(&lio->glist_lock[iq]);
1510 /* Don't free the skb yet */
1514 * liquidio_ptp_adjfine - Adjust ptp frequency
1515 * @ptp: PTP clock info
1516 * @scaled_ppm: how much to adjust by, in scaled parts-per-million
1518 * Scaled parts per million is ppm with a 16-bit binary fractional field.
1520 static int liquidio_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
1522 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1523 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1524 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
1526 unsigned long flags;
1527 bool neg_adj = false;
1534 /* The hardware adds the clock compensation value to the
1535 * PTP clock on every coprocessor clock cycle, so we
1536 * compute the delta in terms of coprocessor clocks.
1538 delta = (u64)ppb << 32;
1539 do_div(delta, oct->coproc_clock_rate);
1541 spin_lock_irqsave(&lio->ptp_lock, flags);
1542 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1547 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1548 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1554 * liquidio_ptp_adjtime - Adjust ptp time
1555 * @ptp: PTP clock info
1556 * @delta: how much to adjust by, in nanosecs
1558 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1560 unsigned long flags;
1561 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1563 spin_lock_irqsave(&lio->ptp_lock, flags);
1564 lio->ptp_adjust += delta;
1565 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1571 * liquidio_ptp_gettime - Get hardware clock time, including any adjustment
1572 * @ptp: PTP clock info
1575 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1576 struct timespec64 *ts)
1579 unsigned long flags;
1580 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1581 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1583 spin_lock_irqsave(&lio->ptp_lock, flags);
1584 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1585 ns += lio->ptp_adjust;
1586 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1588 *ts = ns_to_timespec64(ns);
1594 * liquidio_ptp_settime - Set hardware clock time. Reset adjustment
1595 * @ptp: PTP clock info
1598 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1599 const struct timespec64 *ts)
1602 unsigned long flags;
1603 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1604 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1606 ns = timespec64_to_ns(ts);
1608 spin_lock_irqsave(&lio->ptp_lock, flags);
1609 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1610 lio->ptp_adjust = 0;
1611 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1617 * liquidio_ptp_enable - Check if PTP is enabled
1618 * @ptp: PTP clock info
1623 liquidio_ptp_enable(struct ptp_clock_info __maybe_unused *ptp,
1624 struct ptp_clock_request __maybe_unused *rq,
1625 int __maybe_unused on)
1631 * oct_ptp_open - Open PTP clock source
1632 * @netdev: network device
1634 static void oct_ptp_open(struct net_device *netdev)
1636 struct lio *lio = GET_LIO(netdev);
1637 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1639 spin_lock_init(&lio->ptp_lock);
1641 snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1642 lio->ptp_info.owner = THIS_MODULE;
1643 lio->ptp_info.max_adj = 250000000;
1644 lio->ptp_info.n_alarm = 0;
1645 lio->ptp_info.n_ext_ts = 0;
1646 lio->ptp_info.n_per_out = 0;
1647 lio->ptp_info.pps = 0;
1648 lio->ptp_info.adjfine = liquidio_ptp_adjfine;
1649 lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1650 lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1651 lio->ptp_info.settime64 = liquidio_ptp_settime;
1652 lio->ptp_info.enable = liquidio_ptp_enable;
1654 lio->ptp_adjust = 0;
1656 lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1657 &oct->pci_dev->dev);
1659 if (IS_ERR(lio->ptp_clock))
1660 lio->ptp_clock = NULL;
1664 * liquidio_ptp_init - Init PTP clock
1665 * @oct: octeon device
1667 static void liquidio_ptp_init(struct octeon_device *oct)
1669 u64 clock_comp, cfg;
1671 clock_comp = (u64)NSEC_PER_SEC << 32;
1672 do_div(clock_comp, oct->coproc_clock_rate);
1673 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1676 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1677 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1681 * load_firmware - Load firmware to device
1682 * @oct: octeon device
1684 * Maps device to firmware filename, requests firmware, and downloads it
1686 static int load_firmware(struct octeon_device *oct)
1689 const struct firmware *fw;
1690 char fw_name[LIO_MAX_FW_FILENAME_LEN];
1693 if (fw_type_is_auto()) {
1694 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1695 strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
1697 tmp_fw_type = fw_type;
1700 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1701 octeon_get_conf(oct)->card_name, tmp_fw_type,
1702 LIO_FW_NAME_SUFFIX);
1704 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1706 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n",
1708 release_firmware(fw);
1712 ret = octeon_download_firmware(oct, fw->data, fw->size);
1714 release_firmware(fw);
1720 * octnet_poll_check_txq_status - Poll routine for checking transmit queue status
1721 * @work: work_struct data structure
1723 static void octnet_poll_check_txq_status(struct work_struct *work)
1725 struct cavium_wk *wk = (struct cavium_wk *)work;
1726 struct lio *lio = (struct lio *)wk->ctxptr;
1728 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1731 check_txq_status(lio);
1732 queue_delayed_work(lio->txq_status_wq.wq,
1733 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1737 * setup_tx_poll_fn - Sets up the txq poll check
1738 * @netdev: network device
1740 static inline int setup_tx_poll_fn(struct net_device *netdev)
1742 struct lio *lio = GET_LIO(netdev);
1743 struct octeon_device *oct = lio->oct_dev;
1745 lio->txq_status_wq.wq = alloc_workqueue("txq-status",
1747 if (!lio->txq_status_wq.wq) {
1748 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
1751 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
1752 octnet_poll_check_txq_status);
1753 lio->txq_status_wq.wk.ctxptr = lio;
1754 queue_delayed_work(lio->txq_status_wq.wq,
1755 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1759 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
1761 struct lio *lio = GET_LIO(netdev);
1763 if (lio->txq_status_wq.wq) {
1764 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
1765 destroy_workqueue(lio->txq_status_wq.wq);
1770 * liquidio_open - Net device open for LiquidIO
1771 * @netdev: network device
1773 static int liquidio_open(struct net_device *netdev)
1775 struct lio *lio = GET_LIO(netdev);
1776 struct octeon_device *oct = lio->oct_dev;
1777 struct octeon_device_priv *oct_priv =
1778 (struct octeon_device_priv *)oct->priv;
1779 struct napi_struct *napi, *n;
1782 if (oct->props[lio->ifidx].napi_enabled == 0) {
1783 tasklet_disable(&oct_priv->droq_tasklet);
1785 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1788 oct->props[lio->ifidx].napi_enabled = 1;
1790 if (OCTEON_CN23XX_PF(oct))
1791 oct->droq[0]->ops.poll_mode = 1;
1794 if (oct->ptp_enable)
1795 oct_ptp_open(netdev);
1797 ifstate_set(lio, LIO_IFSTATE_RUNNING);
1799 if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) {
1800 ret = setup_tx_poll_fn(netdev);
1805 netif_tx_start_all_queues(netdev);
1807 /* Ready for link status updates */
1810 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1812 /* tell Octeon to start forwarding packets to host */
1813 ret = send_rx_ctrl_cmd(lio, 1);
1817 /* start periodical statistics fetch */
1818 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
1819 lio->stats_wk.ctxptr = lio;
1820 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
1821 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
1823 dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
1829 if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on)
1830 cleanup_tx_poll_fn(netdev);
1832 if (lio->ptp_clock) {
1833 ptp_clock_unregister(lio->ptp_clock);
1834 lio->ptp_clock = NULL;
1837 if (oct->props[lio->ifidx].napi_enabled == 1) {
1838 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1841 oct->props[lio->ifidx].napi_enabled = 0;
1843 if (OCTEON_CN23XX_PF(oct))
1844 oct->droq[0]->ops.poll_mode = 0;
1851 * liquidio_stop - Net device stop for LiquidIO
1852 * @netdev: network device
1854 static int liquidio_stop(struct net_device *netdev)
1856 struct lio *lio = GET_LIO(netdev);
1857 struct octeon_device *oct = lio->oct_dev;
1858 struct octeon_device_priv *oct_priv =
1859 (struct octeon_device_priv *)oct->priv;
1860 struct napi_struct *napi, *n;
1863 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1865 /* Stop any link updates */
1870 /* Inform that netif carrier is down */
1871 netif_carrier_off(netdev);
1872 netif_tx_disable(netdev);
1874 lio->linfo.link.s.link_up = 0;
1875 lio->link_changes++;
1877 /* Tell Octeon that nic interface is down. */
1878 ret = send_rx_ctrl_cmd(lio, 0);
1882 if (OCTEON_CN23XX_PF(oct)) {
1884 cleanup_tx_poll_fn(netdev);
1886 cleanup_tx_poll_fn(netdev);
1889 cancel_delayed_work_sync(&lio->stats_wk.work);
1891 if (lio->ptp_clock) {
1892 ptp_clock_unregister(lio->ptp_clock);
1893 lio->ptp_clock = NULL;
1896 /* Wait for any pending Rx descriptors */
1897 if (lio_wait_for_clean_oq(oct))
1898 netif_info(lio, rx_err, lio->netdev,
1899 "Proceeding with stop interface after partial RX desc processing\n");
1901 if (oct->props[lio->ifidx].napi_enabled == 1) {
1902 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1905 oct->props[lio->ifidx].napi_enabled = 0;
1907 if (OCTEON_CN23XX_PF(oct))
1908 oct->droq[0]->ops.poll_mode = 0;
1910 tasklet_enable(&oct_priv->droq_tasklet);
1913 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1919 * get_new_flags - Converts a mask based on net device flags
1920 * @netdev: network device
1922 * This routine generates a octnet_ifflags mask from the net device flags
1923 * received from the OS.
1925 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
1927 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1929 if (netdev->flags & IFF_PROMISC)
1930 f |= OCTNET_IFFLAG_PROMISC;
1932 if (netdev->flags & IFF_ALLMULTI)
1933 f |= OCTNET_IFFLAG_ALLMULTI;
1935 if (netdev->flags & IFF_MULTICAST) {
1936 f |= OCTNET_IFFLAG_MULTICAST;
1938 /* Accept all multicast addresses if there are more than we
1941 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1942 f |= OCTNET_IFFLAG_ALLMULTI;
1945 if (netdev->flags & IFF_BROADCAST)
1946 f |= OCTNET_IFFLAG_BROADCAST;
1952 * liquidio_set_mcast_list - Net device set_multicast_list
1953 * @netdev: network device
1955 static void liquidio_set_mcast_list(struct net_device *netdev)
1957 struct lio *lio = GET_LIO(netdev);
1958 struct octeon_device *oct = lio->oct_dev;
1959 struct octnic_ctrl_pkt nctrl;
1960 struct netdev_hw_addr *ha;
1963 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1965 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1967 /* Create a ctrl pkt command to be sent to core app. */
1969 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1970 nctrl.ncmd.s.param1 = get_new_flags(netdev);
1971 nctrl.ncmd.s.param2 = mc_count;
1972 nctrl.ncmd.s.more = mc_count;
1973 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1974 nctrl.netpndev = (u64)netdev;
1975 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1977 /* copy all the addresses into the udd */
1979 netdev_for_each_mc_addr(ha, netdev) {
1981 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
1982 /* no need to swap bytes */
1984 if (++mc > &nctrl.udd[mc_count])
1988 /* Apparently, any activity in this call from the kernel has to
1989 * be atomic. So we won't wait for response.
1992 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1994 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2000 * liquidio_set_mac - Net device set_mac_address
2001 * @netdev: network device
2002 * @p: pointer to sockaddr
2004 static int liquidio_set_mac(struct net_device *netdev, void *p)
2007 struct lio *lio = GET_LIO(netdev);
2008 struct octeon_device *oct = lio->oct_dev;
2009 struct sockaddr *addr = (struct sockaddr *)p;
2010 struct octnic_ctrl_pkt nctrl;
2012 if (!is_valid_ether_addr(addr->sa_data))
2013 return -EADDRNOTAVAIL;
2015 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2018 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2019 nctrl.ncmd.s.param1 = 0;
2020 nctrl.ncmd.s.more = 1;
2021 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2022 nctrl.netpndev = (u64)netdev;
2025 /* The MAC Address is presented in network byte order. */
2026 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2028 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2030 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2034 if (nctrl.sc_status) {
2035 dev_err(&oct->pci_dev->dev,
2036 "%s: MAC Address change failed. sc return=%x\n",
2037 __func__, nctrl.sc_status);
2041 eth_hw_addr_set(netdev, addr->sa_data);
2042 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2048 liquidio_get_stats64(struct net_device *netdev,
2049 struct rtnl_link_stats64 *lstats)
2051 struct lio *lio = GET_LIO(netdev);
2052 struct octeon_device *oct;
2053 u64 pkts = 0, drop = 0, bytes = 0;
2054 struct oct_droq_stats *oq_stats;
2055 struct oct_iq_stats *iq_stats;
2056 int i, iq_no, oq_no;
2060 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2063 for (i = 0; i < oct->num_iqs; i++) {
2064 iq_no = lio->linfo.txpciq[i].s.q_no;
2065 iq_stats = &oct->instr_queue[iq_no]->stats;
2066 pkts += iq_stats->tx_done;
2067 drop += iq_stats->tx_dropped;
2068 bytes += iq_stats->tx_tot_bytes;
2071 lstats->tx_packets = pkts;
2072 lstats->tx_bytes = bytes;
2073 lstats->tx_dropped = drop;
2079 for (i = 0; i < oct->num_oqs; i++) {
2080 oq_no = lio->linfo.rxpciq[i].s.q_no;
2081 oq_stats = &oct->droq[oq_no]->stats;
2082 pkts += oq_stats->rx_pkts_received;
2083 drop += (oq_stats->rx_dropped +
2084 oq_stats->dropped_nodispatch +
2085 oq_stats->dropped_toomany +
2086 oq_stats->dropped_nomem);
2087 bytes += oq_stats->rx_bytes_received;
2090 lstats->rx_bytes = bytes;
2091 lstats->rx_packets = pkts;
2092 lstats->rx_dropped = drop;
2094 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
2095 lstats->collisions = oct->link_stats.fromhost.total_collisions;
2097 /* detailed rx_errors: */
2098 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
2099 /* recved pkt with crc error */
2100 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
2101 /* recv'd frame alignment error */
2102 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
2103 /* recv'r fifo overrun */
2104 lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
2106 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
2107 lstats->rx_frame_errors + lstats->rx_fifo_errors;
2109 /* detailed tx_errors */
2110 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
2111 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
2112 lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
2114 lstats->tx_errors = lstats->tx_aborted_errors +
2115 lstats->tx_carrier_errors +
2116 lstats->tx_fifo_errors;
2120 * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
2121 * @netdev: network device
2122 * @ifr: interface request
2124 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2126 struct hwtstamp_config conf;
2127 struct lio *lio = GET_LIO(netdev);
2129 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2132 switch (conf.tx_type) {
2133 case HWTSTAMP_TX_ON:
2134 case HWTSTAMP_TX_OFF:
2140 switch (conf.rx_filter) {
2141 case HWTSTAMP_FILTER_NONE:
2143 case HWTSTAMP_FILTER_ALL:
2144 case HWTSTAMP_FILTER_SOME:
2145 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2146 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2147 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2148 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2149 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2150 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2151 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2152 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2153 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2154 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2155 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2156 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2157 case HWTSTAMP_FILTER_NTP_ALL:
2158 conf.rx_filter = HWTSTAMP_FILTER_ALL;
2164 if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2165 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2168 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2170 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2174 * liquidio_ioctl - ioctl handler
2175 * @netdev: network device
2176 * @ifr: interface request
2179 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2181 struct lio *lio = GET_LIO(netdev);
2185 if (lio->oct_dev->ptp_enable)
2186 return hwtstamp_ioctl(netdev, ifr);
2194 * handle_timestamp - handle a Tx timestamp response
2195 * @oct: octeon device
2196 * @status: response status
2197 * @buf: pointer to skb
2199 static void handle_timestamp(struct octeon_device *oct,
2203 struct octnet_buf_free_info *finfo;
2204 struct octeon_soft_command *sc;
2205 struct oct_timestamp_resp *resp;
2207 struct sk_buff *skb = (struct sk_buff *)buf;
2209 finfo = (struct octnet_buf_free_info *)skb->cb;
2213 resp = (struct oct_timestamp_resp *)sc->virtrptr;
2215 if (status != OCTEON_REQUEST_DONE) {
2216 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2217 CVM_CAST64(status));
2218 resp->timestamp = 0;
2221 octeon_swap_8B_data(&resp->timestamp, 1);
2223 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2224 struct skb_shared_hwtstamps ts;
2225 u64 ns = resp->timestamp;
2227 netif_info(lio, tx_done, lio->netdev,
2228 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2229 skb, (unsigned long long)ns);
2230 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2231 skb_tstamp_tx(skb, &ts);
2234 octeon_free_soft_command(oct, sc);
2235 tx_buffer_free(skb);
2239 * send_nic_timestamp_pkt - Send a data packet that will be timestamped
2240 * @oct: octeon device
2241 * @ndata: pointer to network data
2242 * @finfo: pointer to private network data
2243 * @xmit_more: more is coming
2245 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2246 struct octnic_data_pkt *ndata,
2247 struct octnet_buf_free_info *finfo,
2251 struct octeon_soft_command *sc;
2258 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2259 sizeof(struct oct_timestamp_resp));
2263 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2264 return IQ_SEND_FAILED;
2267 if (ndata->reqtype == REQTYPE_NORESP_NET)
2268 ndata->reqtype = REQTYPE_RESP_NET;
2269 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2270 ndata->reqtype = REQTYPE_RESP_NET_SG;
2272 sc->callback = handle_timestamp;
2273 sc->callback_arg = finfo->skb;
2274 sc->iq_no = ndata->q_no;
2276 if (OCTEON_CN23XX_PF(oct))
2277 len = (u32)((struct octeon_instr_ih3 *)
2278 (&sc->cmd.cmd3.ih3))->dlengsz;
2280 len = (u32)((struct octeon_instr_ih2 *)
2281 (&sc->cmd.cmd2.ih2))->dlengsz;
2283 ring_doorbell = !xmit_more;
2285 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2286 sc, len, ndata->reqtype);
2288 if (retval == IQ_SEND_FAILED) {
2289 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2291 octeon_free_soft_command(oct, sc);
2293 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2300 * liquidio_xmit - Transmit networks packets to the Octeon interface
2301 * @skb: skbuff struct to be passed to network layer.
2302 * @netdev: pointer to network device
2304 * Return: whether the packet was transmitted to the device okay or not
2305 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
2307 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2310 struct octnet_buf_free_info *finfo;
2311 union octnic_cmd_setup cmdsetup;
2312 struct octnic_data_pkt ndata;
2313 struct octeon_device *oct;
2314 struct oct_iq_stats *stats;
2315 struct octeon_instr_irh *irh;
2316 union tx_info *tx_info;
2318 int q_idx = 0, iq_no = 0;
2319 int j, xmit_more = 0;
2323 lio = GET_LIO(netdev);
2326 q_idx = skb_iq(oct, skb);
2328 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2330 stats = &oct->instr_queue[iq_no]->stats;
2332 /* Check for all conditions in which the current packet cannot be
2335 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2336 (!lio->linfo.link.s.link_up) ||
2338 netif_info(lio, tx_err, lio->netdev,
2339 "Transmit failed link_status : %d\n",
2340 lio->linfo.link.s.link_up);
2341 goto lio_xmit_failed;
2344 /* Use space in skb->cb to store info used to unmap and
2347 finfo = (struct octnet_buf_free_info *)skb->cb;
2352 /* Prepare the attributes for the data to be passed to OSI. */
2353 memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2355 ndata.buf = (void *)finfo;
2359 if (octnet_iq_is_full(oct, ndata.q_no)) {
2360 /* defer sending if queue is full */
2361 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2363 stats->tx_iq_busy++;
2364 return NETDEV_TX_BUSY;
2367 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
2368 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2371 ndata.datasize = skb->len;
2374 cmdsetup.s.iq_no = iq_no;
2376 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2377 if (skb->encapsulation) {
2378 cmdsetup.s.tnl_csum = 1;
2381 cmdsetup.s.transport_csum = 1;
2384 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2385 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2386 cmdsetup.s.timestamp = 1;
2389 if (skb_shinfo(skb)->nr_frags == 0) {
2390 cmdsetup.s.u.datasize = skb->len;
2391 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2393 /* Offload checksum calculation for TCP/UDP packets */
2394 dptr = dma_map_single(&oct->pci_dev->dev,
2398 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2399 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2401 stats->tx_dmamap_fail++;
2402 return NETDEV_TX_BUSY;
2405 if (OCTEON_CN23XX_PF(oct))
2406 ndata.cmd.cmd3.dptr = dptr;
2408 ndata.cmd.cmd2.dptr = dptr;
2410 ndata.reqtype = REQTYPE_NORESP_NET;
2415 struct octnic_gather *g;
2417 spin_lock(&lio->glist_lock[q_idx]);
2418 g = (struct octnic_gather *)
2419 lio_list_delete_head(&lio->glist[q_idx]);
2420 spin_unlock(&lio->glist_lock[q_idx]);
2423 netif_info(lio, tx_err, lio->netdev,
2424 "Transmit scatter gather: glist null!\n");
2425 goto lio_xmit_failed;
2428 cmdsetup.s.gather = 1;
2429 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2430 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2432 memset(g->sg, 0, g->sg_size);
2434 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2436 (skb->len - skb->data_len),
2438 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2439 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2441 stats->tx_dmamap_fail++;
2442 return NETDEV_TX_BUSY;
2444 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2446 frags = skb_shinfo(skb)->nr_frags;
2449 frag = &skb_shinfo(skb)->frags[i - 1];
2451 g->sg[(i >> 2)].ptr[(i & 3)] =
2452 skb_frag_dma_map(&oct->pci_dev->dev,
2453 frag, 0, skb_frag_size(frag),
2456 if (dma_mapping_error(&oct->pci_dev->dev,
2457 g->sg[i >> 2].ptr[i & 3])) {
2458 dma_unmap_single(&oct->pci_dev->dev,
2460 skb->len - skb->data_len,
2462 for (j = 1; j < i; j++) {
2463 frag = &skb_shinfo(skb)->frags[j - 1];
2464 dma_unmap_page(&oct->pci_dev->dev,
2465 g->sg[j >> 2].ptr[j & 3],
2466 skb_frag_size(frag),
2469 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2471 return NETDEV_TX_BUSY;
2474 add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
2479 dptr = g->sg_dma_ptr;
2481 if (OCTEON_CN23XX_PF(oct))
2482 ndata.cmd.cmd3.dptr = dptr;
2484 ndata.cmd.cmd2.dptr = dptr;
2488 ndata.reqtype = REQTYPE_NORESP_NET_SG;
2491 if (OCTEON_CN23XX_PF(oct)) {
2492 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2493 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2495 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2496 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2499 if (skb_shinfo(skb)->gso_size) {
2500 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2501 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2505 /* HW insert VLAN tag */
2506 if (skb_vlan_tag_present(skb)) {
2507 irh->priority = skb_vlan_tag_get(skb) >> 13;
2508 irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2511 xmit_more = netdev_xmit_more();
2513 if (unlikely(cmdsetup.s.timestamp))
2514 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2516 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2517 if (status == IQ_SEND_FAILED)
2518 goto lio_xmit_failed;
2520 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2522 if (status == IQ_SEND_STOP)
2523 netif_stop_subqueue(netdev, q_idx);
2525 netif_trans_update(netdev);
2527 if (tx_info->s.gso_segs)
2528 stats->tx_done += tx_info->s.gso_segs;
2531 stats->tx_tot_bytes += ndata.datasize;
2533 return NETDEV_TX_OK;
2536 stats->tx_dropped++;
2537 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2538 iq_no, stats->tx_dropped);
2540 dma_unmap_single(&oct->pci_dev->dev, dptr,
2541 ndata.datasize, DMA_TO_DEVICE);
2543 octeon_ring_doorbell_locked(oct, iq_no);
2545 tx_buffer_free(skb);
2546 return NETDEV_TX_OK;
2550 * liquidio_tx_timeout - Network device Tx timeout
2551 * @netdev: pointer to network device
2552 * @txqueue: index of the hung transmit queue
2554 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2558 lio = GET_LIO(netdev);
2560 netif_info(lio, tx_err, lio->netdev,
2561 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2562 netdev->stats.tx_dropped);
2563 netif_trans_update(netdev);
2567 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2568 __be16 proto __attribute__((unused)),
2571 struct lio *lio = GET_LIO(netdev);
2572 struct octeon_device *oct = lio->oct_dev;
2573 struct octnic_ctrl_pkt nctrl;
2576 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2579 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2580 nctrl.ncmd.s.param1 = vid;
2581 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2582 nctrl.netpndev = (u64)netdev;
2583 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2585 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2587 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2596 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2597 __be16 proto __attribute__((unused)),
2600 struct lio *lio = GET_LIO(netdev);
2601 struct octeon_device *oct = lio->oct_dev;
2602 struct octnic_ctrl_pkt nctrl;
2605 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2608 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2609 nctrl.ncmd.s.param1 = vid;
2610 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2611 nctrl.netpndev = (u64)netdev;
2612 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2614 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2616 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
2625 * liquidio_set_rxcsum_command - Sending command to enable/disable RX checksum offload
2626 * @netdev: pointer to network device
2627 * @command: OCTNET_CMD_TNL_RX_CSUM_CTL
2628 * @rx_cmd: OCTNET_CMD_RXCSUM_ENABLE/OCTNET_CMD_RXCSUM_DISABLE
2629 * Returns: SUCCESS or FAILURE
2631 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2634 struct lio *lio = GET_LIO(netdev);
2635 struct octeon_device *oct = lio->oct_dev;
2636 struct octnic_ctrl_pkt nctrl;
2639 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2642 nctrl.ncmd.s.cmd = command;
2643 nctrl.ncmd.s.param1 = rx_cmd;
2644 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2645 nctrl.netpndev = (u64)netdev;
2646 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2648 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2650 dev_err(&oct->pci_dev->dev,
2651 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2660 * liquidio_vxlan_port_command - Sending command to add/delete VxLAN UDP port to firmware
2661 * @netdev: pointer to network device
2662 * @command: OCTNET_CMD_VXLAN_PORT_CONFIG
2663 * @vxlan_port: VxLAN port to be added or deleted
2664 * @vxlan_cmd_bit: OCTNET_CMD_VXLAN_PORT_ADD,
2665 * OCTNET_CMD_VXLAN_PORT_DEL
2666 * Return: SUCCESS or FAILURE
2668 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2669 u16 vxlan_port, u8 vxlan_cmd_bit)
2671 struct lio *lio = GET_LIO(netdev);
2672 struct octeon_device *oct = lio->oct_dev;
2673 struct octnic_ctrl_pkt nctrl;
2676 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2679 nctrl.ncmd.s.cmd = command;
2680 nctrl.ncmd.s.more = vxlan_cmd_bit;
2681 nctrl.ncmd.s.param1 = vxlan_port;
2682 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2683 nctrl.netpndev = (u64)netdev;
2684 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2686 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2688 dev_err(&oct->pci_dev->dev,
2689 "VxLAN port add/delete failed in core (ret:0x%x)\n",
2697 static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
2698 unsigned int table, unsigned int entry,
2699 struct udp_tunnel_info *ti)
2701 return liquidio_vxlan_port_command(netdev,
2702 OCTNET_CMD_VXLAN_PORT_CONFIG,
2704 OCTNET_CMD_VXLAN_PORT_ADD);
2707 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
2710 struct udp_tunnel_info *ti)
2712 return liquidio_vxlan_port_command(netdev,
2713 OCTNET_CMD_VXLAN_PORT_CONFIG,
2715 OCTNET_CMD_VXLAN_PORT_DEL);
2718 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
2719 .set_port = liquidio_udp_tunnel_set_port,
2720 .unset_port = liquidio_udp_tunnel_unset_port,
2722 { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
2727 * liquidio_fix_features - Net device fix features
2728 * @netdev: pointer to network device
2729 * @request: features requested
2730 * Return: updated features list
2732 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2733 netdev_features_t request)
2735 struct lio *lio = netdev_priv(netdev);
2737 if ((request & NETIF_F_RXCSUM) &&
2738 !(lio->dev_capability & NETIF_F_RXCSUM))
2739 request &= ~NETIF_F_RXCSUM;
2741 if ((request & NETIF_F_HW_CSUM) &&
2742 !(lio->dev_capability & NETIF_F_HW_CSUM))
2743 request &= ~NETIF_F_HW_CSUM;
2745 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2746 request &= ~NETIF_F_TSO;
2748 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2749 request &= ~NETIF_F_TSO6;
2751 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2752 request &= ~NETIF_F_LRO;
2754 /*Disable LRO if RXCSUM is off */
2755 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2756 (lio->dev_capability & NETIF_F_LRO))
2757 request &= ~NETIF_F_LRO;
2759 if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2760 !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2761 request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2767 * liquidio_set_features - Net device set features
2768 * @netdev: pointer to network device
2769 * @features: features to enable/disable
2771 static int liquidio_set_features(struct net_device *netdev,
2772 netdev_features_t features)
2774 struct lio *lio = netdev_priv(netdev);
2776 if ((features & NETIF_F_LRO) &&
2777 (lio->dev_capability & NETIF_F_LRO) &&
2778 !(netdev->features & NETIF_F_LRO))
2779 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2780 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2781 else if (!(features & NETIF_F_LRO) &&
2782 (lio->dev_capability & NETIF_F_LRO) &&
2783 (netdev->features & NETIF_F_LRO))
2784 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2785 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2787 /* Sending command to firmware to enable/disable RX checksum
2788 * offload settings using ethtool
2790 if (!(netdev->features & NETIF_F_RXCSUM) &&
2791 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2792 (features & NETIF_F_RXCSUM))
2793 liquidio_set_rxcsum_command(netdev,
2794 OCTNET_CMD_TNL_RX_CSUM_CTL,
2795 OCTNET_CMD_RXCSUM_ENABLE);
2796 else if ((netdev->features & NETIF_F_RXCSUM) &&
2797 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2798 !(features & NETIF_F_RXCSUM))
2799 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2800 OCTNET_CMD_RXCSUM_DISABLE);
2802 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2803 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2804 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2805 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2806 OCTNET_CMD_VLAN_FILTER_ENABLE);
2807 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2808 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2809 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2810 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2811 OCTNET_CMD_VLAN_FILTER_DISABLE);
2816 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2817 u8 *mac, bool is_admin_assigned)
2819 struct lio *lio = GET_LIO(netdev);
2820 struct octeon_device *oct = lio->oct_dev;
2821 struct octnic_ctrl_pkt nctrl;
2824 if (!is_valid_ether_addr(mac))
2827 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
2830 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2833 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2834 /* vfidx is 0 based, but vf_num (param1) is 1 based */
2835 nctrl.ncmd.s.param1 = vfidx + 1;
2836 nctrl.ncmd.s.more = 1;
2837 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2838 nctrl.netpndev = (u64)netdev;
2839 if (is_admin_assigned) {
2840 nctrl.ncmd.s.param2 = true;
2841 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2845 /* The MAC Address is presented in network byte order. */
2846 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
2848 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
2850 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2857 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
2859 struct lio *lio = GET_LIO(netdev);
2860 struct octeon_device *oct = lio->oct_dev;
2863 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2866 retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
2868 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
2873 static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx,
2876 struct lio *lio = GET_LIO(netdev);
2877 struct octeon_device *oct = lio->oct_dev;
2878 struct octnic_ctrl_pkt nctrl;
2881 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) {
2882 netif_info(lio, drv, lio->netdev,
2883 "firmware does not support spoofchk\n");
2887 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
2888 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
2893 if (oct->sriov_info.vf_spoofchk[vfidx])
2897 if (!oct->sriov_info.vf_spoofchk[vfidx])
2901 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2902 nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1;
2903 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK;
2904 nctrl.ncmd.s.param1 =
2905 vfidx + 1; /* vfidx is 0 based,
2906 * but vf_num (param1) is 1 based
2908 nctrl.ncmd.s.param2 = enable;
2909 nctrl.ncmd.s.more = 0;
2910 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2913 retval = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2916 netif_info(lio, drv, lio->netdev,
2917 "Failed to set VF %d spoofchk %s\n", vfidx,
2918 enable ? "on" : "off");
2922 oct->sriov_info.vf_spoofchk[vfidx] = enable;
2923 netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx,
2924 enable ? "on" : "off");
2929 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
2930 u16 vlan, u8 qos, __be16 vlan_proto)
2932 struct lio *lio = GET_LIO(netdev);
2933 struct octeon_device *oct = lio->oct_dev;
2934 struct octnic_ctrl_pkt nctrl;
2938 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2941 if (vlan_proto != htons(ETH_P_8021Q))
2942 return -EPROTONOSUPPORT;
2944 if (vlan >= VLAN_N_VID || qos > 7)
2948 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
2952 if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
2955 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2958 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2960 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2962 nctrl.ncmd.s.param1 = vlantci;
2963 nctrl.ncmd.s.param2 =
2964 vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
2965 nctrl.ncmd.s.more = 0;
2966 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2969 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2976 oct->sriov_info.vf_vlantci[vfidx] = vlantci;
2981 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
2982 struct ifla_vf_info *ivi)
2984 struct lio *lio = GET_LIO(netdev);
2985 struct octeon_device *oct = lio->oct_dev;
2988 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2991 memset(ivi, 0, sizeof(struct ifla_vf_info));
2994 macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
2995 ether_addr_copy(&ivi->mac[0], macaddr);
2996 ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
2997 ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
2998 if (oct->sriov_info.trusted_vf.active &&
2999 oct->sriov_info.trusted_vf.id == vfidx)
3000 ivi->trusted = true;
3002 ivi->trusted = false;
3003 ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
3004 ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx];
3005 ivi->max_tx_rate = lio->linfo.link.s.speed;
3006 ivi->min_tx_rate = 0;
3011 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
3013 struct octeon_device *oct = lio->oct_dev;
3014 struct octeon_soft_command *sc;
3017 sc = octeon_alloc_soft_command(oct, 0, 16, 0);
3021 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
3023 /* vfidx is 0 based, but vf_num (param1) is 1 based */
3024 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
3025 OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
3028 init_completion(&sc->complete);
3029 sc->sc_status = OCTEON_REQUEST_PENDING;
3031 retval = octeon_send_soft_command(oct, sc);
3032 if (retval == IQ_SEND_FAILED) {
3033 octeon_free_soft_command(oct, sc);
3036 /* Wait for response or timeout */
3037 retval = wait_for_sc_completion_timeout(oct, sc, 0);
3041 WRITE_ONCE(sc->caller_is_done, true);
3047 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
3050 struct lio *lio = GET_LIO(netdev);
3051 struct octeon_device *oct = lio->oct_dev;
3053 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
3054 /* trusted vf is not supported by firmware older than 1.7.1 */
3058 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
3059 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
3066 if (oct->sriov_info.trusted_vf.active &&
3067 oct->sriov_info.trusted_vf.id == vfidx)
3070 if (oct->sriov_info.trusted_vf.active) {
3071 netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
3077 if (!oct->sriov_info.trusted_vf.active)
3081 if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3083 oct->sriov_info.trusted_vf.id = vfidx;
3084 oct->sriov_info.trusted_vf.active = true;
3086 oct->sriov_info.trusted_vf.active = false;
3089 netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3090 setting ? "" : "not ");
3092 netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3099 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3102 struct lio *lio = GET_LIO(netdev);
3103 struct octeon_device *oct = lio->oct_dev;
3104 struct octnic_ctrl_pkt nctrl;
3107 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3110 if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3113 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3114 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3115 nctrl.ncmd.s.param1 =
3116 vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3117 nctrl.ncmd.s.param2 = linkstate;
3118 nctrl.ncmd.s.more = 0;
3119 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3122 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
3125 oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3133 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3135 struct lio_devlink_priv *priv;
3136 struct octeon_device *oct;
3138 priv = devlink_priv(devlink);
3141 *mode = oct->eswitch_mode;
3147 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
3148 struct netlink_ext_ack *extack)
3150 struct lio_devlink_priv *priv;
3151 struct octeon_device *oct;
3154 priv = devlink_priv(devlink);
3157 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3160 if (oct->eswitch_mode == mode)
3164 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3165 oct->eswitch_mode = mode;
3166 ret = lio_vf_rep_create(oct);
3169 case DEVLINK_ESWITCH_MODE_LEGACY:
3170 lio_vf_rep_destroy(oct);
3171 oct->eswitch_mode = mode;
3181 static const struct devlink_ops liquidio_devlink_ops = {
3182 .eswitch_mode_get = liquidio_eswitch_mode_get,
3183 .eswitch_mode_set = liquidio_eswitch_mode_set,
3187 liquidio_get_port_parent_id(struct net_device *dev,
3188 struct netdev_phys_item_id *ppid)
3190 struct lio *lio = GET_LIO(dev);
3191 struct octeon_device *oct = lio->oct_dev;
3193 if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3196 ppid->id_len = ETH_ALEN;
3197 ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
3202 static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
3203 struct ifla_vf_stats *vf_stats)
3205 struct lio *lio = GET_LIO(netdev);
3206 struct octeon_device *oct = lio->oct_dev;
3207 struct oct_vf_stats stats;
3210 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3213 memset(&stats, 0, sizeof(struct oct_vf_stats));
3214 ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
3216 vf_stats->rx_packets = stats.rx_packets;
3217 vf_stats->tx_packets = stats.tx_packets;
3218 vf_stats->rx_bytes = stats.rx_bytes;
3219 vf_stats->tx_bytes = stats.tx_bytes;
3220 vf_stats->broadcast = stats.broadcast;
3221 vf_stats->multicast = stats.multicast;
3227 static const struct net_device_ops lionetdevops = {
3228 .ndo_open = liquidio_open,
3229 .ndo_stop = liquidio_stop,
3230 .ndo_start_xmit = liquidio_xmit,
3231 .ndo_get_stats64 = liquidio_get_stats64,
3232 .ndo_set_mac_address = liquidio_set_mac,
3233 .ndo_set_rx_mode = liquidio_set_mcast_list,
3234 .ndo_tx_timeout = liquidio_tx_timeout,
3236 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
3237 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
3238 .ndo_change_mtu = liquidio_change_mtu,
3239 .ndo_eth_ioctl = liquidio_ioctl,
3240 .ndo_fix_features = liquidio_fix_features,
3241 .ndo_set_features = liquidio_set_features,
3242 .ndo_set_vf_mac = liquidio_set_vf_mac,
3243 .ndo_set_vf_vlan = liquidio_set_vf_vlan,
3244 .ndo_get_vf_config = liquidio_get_vf_config,
3245 .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk,
3246 .ndo_set_vf_trust = liquidio_set_vf_trust,
3247 .ndo_set_vf_link_state = liquidio_set_vf_link_state,
3248 .ndo_get_vf_stats = liquidio_get_vf_stats,
3249 .ndo_get_port_parent_id = liquidio_get_port_parent_id,
3253 * liquidio_init - Entry point for the liquidio module
3255 static int __init liquidio_init(void)
3258 struct handshake *hs;
3260 init_completion(&first_stage);
3262 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3264 if (liquidio_init_pci())
3267 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3269 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3272 wait_for_completion(&hs->init);
3274 /* init handshake failed */
3275 dev_err(&hs->pci_dev->dev,
3276 "Failed to init device\n");
3277 liquidio_deinit_pci();
3283 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3286 wait_for_completion_timeout(&hs->started,
3287 msecs_to_jiffies(30000));
3288 if (!hs->started_ok) {
3289 /* starter handshake failed */
3290 dev_err(&hs->pci_dev->dev,
3291 "Firmware failed to start\n");
3292 liquidio_deinit_pci();
3301 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3303 struct octeon_device *oct = (struct octeon_device *)buf;
3304 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3306 union oct_link_status *ls;
3309 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3310 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3311 recv_pkt->buffer_size[0],
3312 recv_pkt->rh.r_nic_info.gmxport);
3316 gmxport = recv_pkt->rh.r_nic_info.gmxport;
3317 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3318 OCT_DROQ_INFO_SIZE);
3320 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3321 for (i = 0; i < oct->ifcount; i++) {
3322 if (oct->props[i].gmxport == gmxport) {
3323 update_link_status(oct->props[i].netdev, ls);
3329 for (i = 0; i < recv_pkt->buffer_count; i++)
3330 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3331 octeon_free_recv_info(recv_info);
3336 * setup_nic_devices - Setup network interfaces
3337 * @octeon_dev: octeon device
3339 * Called during init time for each device. It assumes the NIC
3340 * is already up and running. The link information for each
3341 * interface is passed in link_info.
3343 static int setup_nic_devices(struct octeon_device *octeon_dev)
3345 struct lio *lio = NULL;
3346 struct net_device *netdev;
3347 u8 mac[6], i, j, *fw_ver, *micro_ver;
3348 unsigned long micro;
3350 struct octeon_soft_command *sc;
3351 struct liquidio_if_cfg_resp *resp;
3352 struct octdev_props *props;
3353 int retval, num_iqueues, num_oqueues;
3354 int max_num_queues = 0;
3355 union oct_nic_if_cfg if_cfg;
3356 unsigned int base_queue;
3357 unsigned int gmx_port_id;
3358 u32 resp_size, data_size;
3360 struct lio_version *vdata;
3361 struct devlink *devlink;
3362 struct lio_devlink_priv *lio_devlink;
3364 /* This is to handle link status changes */
3365 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3367 lio_nic_info, octeon_dev);
3369 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3370 * They are handled directly.
3372 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3375 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3378 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3379 free_netsgbuf_with_resp);
3381 for (i = 0; i < octeon_dev->ifcount; i++) {
3382 resp_size = sizeof(struct liquidio_if_cfg_resp);
3383 data_size = sizeof(struct lio_version);
3384 sc = (struct octeon_soft_command *)
3385 octeon_alloc_soft_command(octeon_dev, data_size,
3387 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3388 vdata = (struct lio_version *)sc->virtdptr;
3390 *((u64 *)vdata) = 0;
3391 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3392 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3393 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3395 if (OCTEON_CN23XX_PF(octeon_dev)) {
3396 num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3397 num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3398 base_queue = octeon_dev->sriov_info.pf_srn;
3400 gmx_port_id = octeon_dev->pf_num;
3401 ifidx_or_pfnum = octeon_dev->pf_num;
3403 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3404 octeon_get_conf(octeon_dev), i);
3405 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3406 octeon_get_conf(octeon_dev), i);
3407 base_queue = CFG_GET_BASE_QUE_NIC_IF(
3408 octeon_get_conf(octeon_dev), i);
3409 gmx_port_id = CFG_GET_GMXID_NIC_IF(
3410 octeon_get_conf(octeon_dev), i);
3414 dev_dbg(&octeon_dev->pci_dev->dev,
3415 "requesting config for interface %d, iqs %d, oqs %d\n",
3416 ifidx_or_pfnum, num_iqueues, num_oqueues);
3419 if_cfg.s.num_iqueues = num_iqueues;
3420 if_cfg.s.num_oqueues = num_oqueues;
3421 if_cfg.s.base_queue = base_queue;
3422 if_cfg.s.gmx_port_id = gmx_port_id;
3426 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3427 OPCODE_NIC_IF_CFG, 0,
3430 init_completion(&sc->complete);
3431 sc->sc_status = OCTEON_REQUEST_PENDING;
3433 retval = octeon_send_soft_command(octeon_dev, sc);
3434 if (retval == IQ_SEND_FAILED) {
3435 dev_err(&octeon_dev->pci_dev->dev,
3436 "iq/oq config failed status: %x\n",
3438 /* Soft instr is freed by driver in case of failure. */
3439 octeon_free_soft_command(octeon_dev, sc);
3443 /* Sleep on a wait queue till the cond flag indicates that the
3444 * response arrived or timed-out.
3446 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
3450 retval = resp->status;
3452 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3453 WRITE_ONCE(sc->caller_is_done, true);
3454 goto setup_nic_dev_done;
3456 snprintf(octeon_dev->fw_info.liquidio_firmware_version,
3458 resp->cfg_info.liquidio_firmware_version);
3460 /* Verify f/w version (in case of 'auto' loading from flash) */
3461 fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3462 if (memcmp(LIQUIDIO_BASE_VERSION,
3464 strlen(LIQUIDIO_BASE_VERSION))) {
3465 dev_err(&octeon_dev->pci_dev->dev,
3466 "Unmatched firmware version. Expected %s.x, got %s.\n",
3467 LIQUIDIO_BASE_VERSION, fw_ver);
3468 WRITE_ONCE(sc->caller_is_done, true);
3469 goto setup_nic_dev_done;
3470 } else if (atomic_read(octeon_dev->adapter_fw_state) ==
3472 dev_info(&octeon_dev->pci_dev->dev,
3473 "Using auto-loaded firmware version %s.\n",
3477 /* extract micro version field; point past '<maj>.<min>.' */
3478 micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
3479 if (kstrtoul(micro_ver, 10, µ) != 0)
3481 octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
3482 octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
3483 octeon_dev->fw_info.ver.rev = micro;
3485 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3486 (sizeof(struct liquidio_if_cfg_info)) >> 3);
3488 num_iqueues = hweight64(resp->cfg_info.iqmask);
3489 num_oqueues = hweight64(resp->cfg_info.oqmask);
3491 if (!(num_iqueues) || !(num_oqueues)) {
3492 dev_err(&octeon_dev->pci_dev->dev,
3493 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3494 resp->cfg_info.iqmask,
3495 resp->cfg_info.oqmask);
3496 WRITE_ONCE(sc->caller_is_done, true);
3497 goto setup_nic_dev_done;
3500 if (OCTEON_CN6XXX(octeon_dev)) {
3501 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3503 } else if (OCTEON_CN23XX_PF(octeon_dev)) {
3504 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3508 dev_dbg(&octeon_dev->pci_dev->dev,
3509 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3510 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3511 num_iqueues, num_oqueues, max_num_queues);
3512 netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
3515 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3516 WRITE_ONCE(sc->caller_is_done, true);
3517 goto setup_nic_dev_done;
3520 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3522 /* Associate the routines that will handle different
3525 netdev->netdev_ops = &lionetdevops;
3527 retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
3529 dev_err(&octeon_dev->pci_dev->dev,
3530 "setting real number rx failed\n");
3531 WRITE_ONCE(sc->caller_is_done, true);
3532 goto setup_nic_dev_free;
3535 retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
3537 dev_err(&octeon_dev->pci_dev->dev,
3538 "setting real number tx failed\n");
3539 WRITE_ONCE(sc->caller_is_done, true);
3540 goto setup_nic_dev_free;
3543 lio = GET_LIO(netdev);
3545 memset(lio, 0, sizeof(struct lio));
3547 lio->ifidx = ifidx_or_pfnum;
3549 props = &octeon_dev->props[i];
3550 props->gmxport = resp->cfg_info.linfo.gmxport;
3551 props->netdev = netdev;
3553 lio->linfo.num_rxpciq = num_oqueues;
3554 lio->linfo.num_txpciq = num_iqueues;
3555 for (j = 0; j < num_oqueues; j++) {
3556 lio->linfo.rxpciq[j].u64 =
3557 resp->cfg_info.linfo.rxpciq[j].u64;
3559 for (j = 0; j < num_iqueues; j++) {
3560 lio->linfo.txpciq[j].u64 =
3561 resp->cfg_info.linfo.txpciq[j].u64;
3563 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3564 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3565 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3567 WRITE_ONCE(sc->caller_is_done, true);
3569 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3571 if (OCTEON_CN23XX_PF(octeon_dev) ||
3572 OCTEON_CN6XXX(octeon_dev)) {
3573 lio->dev_capability = NETIF_F_HIGHDMA
3576 | NETIF_F_SG | NETIF_F_RXCSUM
3578 | NETIF_F_TSO | NETIF_F_TSO6
3581 netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3583 /* Copy of transmit encapsulation capabilities:
3584 * TSO, TSO6, Checksums for this device
3586 lio->enc_dev_capability = NETIF_F_IP_CSUM
3588 | NETIF_F_GSO_UDP_TUNNEL
3589 | NETIF_F_HW_CSUM | NETIF_F_SG
3591 | NETIF_F_TSO | NETIF_F_TSO6
3594 netdev->hw_enc_features = (lio->enc_dev_capability &
3597 netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
3599 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3601 netdev->vlan_features = lio->dev_capability;
3602 /* Add any unchangeable hw features */
3603 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
3604 NETIF_F_HW_VLAN_CTAG_RX |
3605 NETIF_F_HW_VLAN_CTAG_TX;
3607 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3609 netdev->hw_features = lio->dev_capability;
3610 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3611 netdev->hw_features = netdev->hw_features &
3612 ~NETIF_F_HW_VLAN_CTAG_RX;
3614 /* MTU range: 68 - 16000 */
3615 netdev->min_mtu = LIO_MIN_MTU_SIZE;
3616 netdev->max_mtu = LIO_MAX_MTU_SIZE;
3618 /* Point to the properties for octeon device to which this
3619 * interface belongs.
3621 lio->oct_dev = octeon_dev;
3622 lio->octprops = props;
3623 lio->netdev = netdev;
3625 dev_dbg(&octeon_dev->pci_dev->dev,
3626 "if%d gmx: %d hw_addr: 0x%llx\n", i,
3627 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3629 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3632 eth_random_addr(vfmac);
3633 if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
3634 dev_err(&octeon_dev->pci_dev->dev,
3635 "Error setting VF%d MAC address\n",
3637 goto setup_nic_dev_free;
3641 /* 64-bit swap required on LE machines */
3642 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3643 for (j = 0; j < 6; j++)
3644 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3646 /* Copy MAC Address to OS network device structure */
3648 eth_hw_addr_set(netdev, mac);
3650 /* By default all interfaces on a single Octeon uses the same
3653 lio->txq = lio->linfo.txpciq[0].s.q_no;
3654 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3655 if (liquidio_setup_io_queues(octeon_dev, i,
3656 lio->linfo.num_txpciq,
3657 lio->linfo.num_rxpciq)) {
3658 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3659 goto setup_nic_dev_free;
3662 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3664 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3665 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3667 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
3668 dev_err(&octeon_dev->pci_dev->dev,
3669 "Gather list allocation failed\n");
3670 goto setup_nic_dev_free;
3673 /* Register ethtool support */
3674 liquidio_set_ethtool_ops(netdev);
3675 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3676 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3678 octeon_dev->priv_flags = 0x0;
3680 if (netdev->features & NETIF_F_LRO)
3681 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3682 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3684 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3685 OCTNET_CMD_VLAN_FILTER_ENABLE);
3687 if ((debug != -1) && (debug & NETIF_MSG_HW))
3688 liquidio_set_feature(netdev,
3689 OCTNET_CMD_VERBOSE_ENABLE, 0);
3691 if (setup_link_status_change_wq(netdev))
3692 goto setup_nic_dev_free;
3694 if ((octeon_dev->fw_info.app_cap_flags &
3695 LIQUIDIO_TIME_SYNC_CAP) &&
3696 setup_sync_octeon_time_wq(netdev))
3697 goto setup_nic_dev_free;
3699 if (setup_rx_oom_poll_fn(netdev))
3700 goto setup_nic_dev_free;
3702 /* Register the network device with the OS */
3703 if (register_netdev(netdev)) {
3704 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3705 goto setup_nic_dev_free;
3708 dev_dbg(&octeon_dev->pci_dev->dev,
3709 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3710 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3711 netif_carrier_off(netdev);
3712 lio->link_changes++;
3714 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3716 /* Sending command to firmware to enable Rx checksum offload
3717 * by default at the time of setup of Liquidio driver for
3720 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3721 OCTNET_CMD_RXCSUM_ENABLE);
3722 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3723 OCTNET_CMD_TXCSUM_ENABLE);
3725 dev_dbg(&octeon_dev->pci_dev->dev,
3726 "NIC ifidx:%d Setup successful\n", i);
3728 if (octeon_dev->subsystem_id ==
3729 OCTEON_CN2350_25GB_SUBSYS_ID ||
3730 octeon_dev->subsystem_id ==
3731 OCTEON_CN2360_25GB_SUBSYS_ID) {
3732 cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
3733 octeon_dev->fw_info.ver.min,
3734 octeon_dev->fw_info.ver.rev);
3736 /* speed control unsupported in f/w older than 1.7.2 */
3737 if (cur_ver < OCT_FW_VER(1, 7, 2)) {
3738 dev_info(&octeon_dev->pci_dev->dev,
3739 "speed setting not supported by f/w.");
3740 octeon_dev->speed_setting = 25;
3741 octeon_dev->no_speed_setting = 1;
3743 liquidio_get_speed(lio);
3746 if (octeon_dev->speed_setting == 0) {
3747 octeon_dev->speed_setting = 25;
3748 octeon_dev->no_speed_setting = 1;
3751 octeon_dev->no_speed_setting = 1;
3752 octeon_dev->speed_setting = 10;
3754 octeon_dev->speed_boot = octeon_dev->speed_setting;
3756 /* don't read FEC setting if unsupported by f/w (see above) */
3757 if (octeon_dev->speed_boot == 25 &&
3758 !octeon_dev->no_speed_setting) {
3759 liquidio_get_fec(lio);
3760 octeon_dev->props[lio->ifidx].fec_boot =
3761 octeon_dev->props[lio->ifidx].fec;
3765 device_lock(&octeon_dev->pci_dev->dev);
3766 devlink = devlink_alloc(&liquidio_devlink_ops,
3767 sizeof(struct lio_devlink_priv),
3768 &octeon_dev->pci_dev->dev);
3770 device_unlock(&octeon_dev->pci_dev->dev);
3771 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3772 goto setup_nic_dev_free;
3775 lio_devlink = devlink_priv(devlink);
3776 lio_devlink->oct = octeon_dev;
3778 octeon_dev->devlink = devlink;
3779 octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3780 devlink_register(devlink);
3781 device_unlock(&octeon_dev->pci_dev->dev);
3788 dev_err(&octeon_dev->pci_dev->dev,
3789 "NIC ifidx:%d Setup failed\n", i);
3790 liquidio_destroy_nic_device(octeon_dev, i);
3798 #ifdef CONFIG_PCI_IOV
3799 static int octeon_enable_sriov(struct octeon_device *oct)
3801 unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3802 struct pci_dev *vfdev;
3806 if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3807 err = pci_enable_sriov(oct->pci_dev,
3808 oct->sriov_info.num_vfs_alloced);
3810 dev_err(&oct->pci_dev->dev,
3811 "OCTEON: Failed to enable PCI sriov: %d\n",
3813 oct->sriov_info.num_vfs_alloced = 0;
3816 oct->sriov_info.sriov_enabled = 1;
3818 /* init lookup table that maps DPI ring number to VF pci_dev
3822 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3823 OCTEON_CN23XX_VF_VID, NULL);
3825 if (vfdev->is_virtfn &&
3826 (vfdev->physfn == oct->pci_dev)) {
3827 oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3829 u += oct->sriov_info.rings_per_vf;
3831 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3832 OCTEON_CN23XX_VF_VID, vfdev);
3836 return num_vfs_alloced;
3839 static int lio_pci_sriov_disable(struct octeon_device *oct)
3843 if (pci_vfs_assigned(oct->pci_dev)) {
3844 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3848 pci_disable_sriov(oct->pci_dev);
3851 while (u < MAX_POSSIBLE_VFS) {
3852 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3853 u += oct->sriov_info.rings_per_vf;
3856 oct->sriov_info.num_vfs_alloced = 0;
3857 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3863 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3865 struct octeon_device *oct = pci_get_drvdata(dev);
3868 if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3869 (oct->sriov_info.sriov_enabled)) {
3870 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3871 oct->pf_num, num_vfs);
3876 lio_vf_rep_destroy(oct);
3877 ret = lio_pci_sriov_disable(oct);
3878 } else if (num_vfs > oct->sriov_info.max_vfs) {
3879 dev_err(&oct->pci_dev->dev,
3880 "OCTEON: Max allowed VFs:%d user requested:%d",
3881 oct->sriov_info.max_vfs, num_vfs);
3884 oct->sriov_info.num_vfs_alloced = num_vfs;
3885 ret = octeon_enable_sriov(oct);
3886 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3887 oct->pf_num, num_vfs);
3888 ret = lio_vf_rep_create(oct);
3890 dev_info(&oct->pci_dev->dev,
3891 "vf representor create failed");
3899 * liquidio_init_nic_module - initialize the NIC
3900 * @oct: octeon device
3902 * This initialization routine is called once the Octeon device application is
3905 static int liquidio_init_nic_module(struct octeon_device *oct)
3908 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3910 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3912 /* only default iq and oq were initialized
3913 * initialize the rest as well
3915 /* run port_config command for each port */
3916 oct->ifcount = num_nic_ports;
3918 memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3920 for (i = 0; i < MAX_OCTEON_LINKS; i++)
3921 oct->props[i].gmxport = -1;
3923 retval = setup_nic_devices(oct);
3925 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3926 goto octnet_init_failure;
3929 /* Call vf_rep_modinit if the firmware is switchdev capable
3930 * and do it from the first liquidio function probed.
3932 if (!oct->octeon_id &&
3933 oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
3934 retval = lio_vf_rep_modinit();
3936 liquidio_stop_nic_module(oct);
3937 goto octnet_init_failure;
3941 liquidio_ptp_init(oct);
3943 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3947 octnet_init_failure:
3955 * nic_starter - finish init
3956 * @work: work struct work_struct
3958 * starter callback that invokes the remaining initialization work after the NIC is up and running.
3960 static void nic_starter(struct work_struct *work)
3962 struct octeon_device *oct;
3963 struct cavium_wk *wk = (struct cavium_wk *)work;
3965 oct = (struct octeon_device *)wk->ctxptr;
3967 if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3970 /* If the status of the device is CORE_OK, the core
3971 * application has reported its application type. Call
3972 * any registered handlers now and move to the RUNNING
3975 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3976 schedule_delayed_work(&oct->nic_poll_work.work,
3977 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3981 atomic_set(&oct->status, OCT_DEV_RUNNING);
3983 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3984 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3986 if (liquidio_init_nic_module(oct))
3987 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3989 handshake[oct->octeon_id].started_ok = 1;
3991 dev_err(&oct->pci_dev->dev,
3992 "Unexpected application running on NIC (%d). Check firmware.\n",
3996 complete(&handshake[oct->octeon_id].started);
4000 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
4002 struct octeon_device *oct = (struct octeon_device *)buf;
4003 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
4004 int i, notice, vf_idx;
4008 notice = recv_pkt->rh.r.ossp;
4009 data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
4011 /* the first 64-bit word of data is the vf_num */
4013 octeon_swap_8B_data(&vf_num, 1);
4014 vf_idx = (int)vf_num - 1;
4016 cores_crashed = READ_ONCE(oct->cores_crashed);
4018 if (notice == VF_DRV_LOADED) {
4019 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
4020 oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
4021 dev_info(&oct->pci_dev->dev,
4022 "driver for VF%d was loaded\n", vf_idx);
4024 try_module_get(THIS_MODULE);
4026 } else if (notice == VF_DRV_REMOVED) {
4027 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
4028 oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
4029 dev_info(&oct->pci_dev->dev,
4030 "driver for VF%d was removed\n", vf_idx);
4032 module_put(THIS_MODULE);
4034 } else if (notice == VF_DRV_MACADDR_CHANGED) {
4035 u8 *b = (u8 *)&data[1];
4037 oct->sriov_info.vf_macaddr[vf_idx] = data[1];
4038 dev_info(&oct->pci_dev->dev,
4039 "VF driver changed VF%d's MAC address to %pM\n",
4043 for (i = 0; i < recv_pkt->buffer_count; i++)
4044 recv_buffer_free(recv_pkt->buffer_ptr[i]);
4045 octeon_free_recv_info(recv_info);
4051 * octeon_device_init - Device initialization for each Octeon device that is probed
4052 * @octeon_dev: octeon device
4054 static int octeon_device_init(struct octeon_device *octeon_dev)
4057 char bootcmd[] = "\n";
4058 char *dbg_enb = NULL;
4059 enum lio_fw_state fw_state;
4060 struct octeon_device_priv *oct_priv =
4061 (struct octeon_device_priv *)octeon_dev->priv;
4062 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4064 /* Enable access to the octeon device and make its DMA capability
4067 if (octeon_pci_os_setup(octeon_dev))
4070 atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4072 /* Identify the Octeon type and map the BAR address space. */
4073 if (octeon_chip_specific_setup(octeon_dev)) {
4074 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4078 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4080 /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4081 * since that is what is required for the reference to be removed
4082 * during de-initialization (see 'octeon_destroy_resources').
4084 octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4085 PCI_SLOT(octeon_dev->pci_dev->devfn),
4086 PCI_FUNC(octeon_dev->pci_dev->devfn),
4089 octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4091 /* CN23XX supports preloaded firmware if the following is true:
4093 * The adapter indicates that firmware is currently running AND
4094 * 'fw_type' is 'auto'.
4096 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4098 if (OCTEON_CN23XX_PF(octeon_dev) &&
4099 cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4100 atomic_cmpxchg(octeon_dev->adapter_fw_state,
4101 FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4104 /* If loading firmware, only first device of adapter needs to do so. */
4105 fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4106 FW_NEEDS_TO_BE_LOADED,
4107 FW_IS_BEING_LOADED);
4109 /* Here, [local variable] 'fw_state' is set to one of:
4111 * FW_IS_PRELOADED: No firmware is to be loaded (see above)
4112 * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4113 * firmware to the adapter.
4114 * FW_IS_BEING_LOADED: The driver's second instance will not load
4115 * firmware to the adapter.
4118 /* Prior to f/w load, perform a soft reset of the Octeon device;
4119 * if error resetting, return w/error.
4121 if (fw_state == FW_NEEDS_TO_BE_LOADED)
4122 if (octeon_dev->fn_list.soft_reset(octeon_dev))
4125 /* Initialize the dispatch mechanism used to push packets arriving on
4126 * Octeon Output queues.
4128 if (octeon_init_dispatch_list(octeon_dev))
4131 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4132 OPCODE_NIC_CORE_DRV_ACTIVE,
4133 octeon_core_drv_init,
4136 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4137 OPCODE_NIC_VF_DRV_NOTICE,
4138 octeon_recv_vf_drv_notice, octeon_dev);
4139 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4140 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4141 schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4142 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4144 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4146 if (octeon_set_io_queues_off(octeon_dev)) {
4147 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4151 if (OCTEON_CN23XX_PF(octeon_dev)) {
4152 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4154 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4159 /* Initialize soft command buffer pool
4161 if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4162 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4165 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4167 /* Setup the data structures that manage this Octeon's Input queues. */
4168 if (octeon_setup_instr_queues(octeon_dev)) {
4169 dev_err(&octeon_dev->pci_dev->dev,
4170 "instruction queue initialization failed\n");
4173 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4175 /* Initialize lists to manage the requests of different types that
4176 * arrive from user & kernel applications for this octeon device.
4178 if (octeon_setup_response_list(octeon_dev)) {
4179 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4182 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4184 if (octeon_setup_output_queues(octeon_dev)) {
4185 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4189 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4191 if (OCTEON_CN23XX_PF(octeon_dev)) {
4192 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4193 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4196 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4198 if (octeon_allocate_ioq_vector
4200 octeon_dev->sriov_info.num_pf_rings)) {
4201 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4204 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4207 /* The input and output queue registers were setup earlier (the
4208 * queues were not enabled). Any additional registers
4209 * that need to be programmed should be done now.
4211 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4213 dev_err(&octeon_dev->pci_dev->dev,
4214 "Failed to configure device registers\n");
4219 /* Initialize the tasklet that handles output queue packet processing.*/
4220 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4221 tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh);
4223 /* Setup the interrupt handler and record the INT SUM register address
4225 if (octeon_setup_interrupt(octeon_dev,
4226 octeon_dev->sriov_info.num_pf_rings))
4229 /* Enable Octeon device interrupts */
4230 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4232 atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4234 /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4235 * the output queue is enabled.
4236 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4237 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4238 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4239 * before any credits have been issued, causing the ring to be reset
4240 * (and the f/w appear to never have started).
4242 for (j = 0; j < octeon_dev->num_oqs; j++)
4243 writel(octeon_dev->droq[j]->max_count,
4244 octeon_dev->droq[j]->pkts_credit_reg);
4246 /* Enable the input and output queues for this Octeon device */
4247 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4249 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4253 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4255 if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4256 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4258 dev_info(&octeon_dev->pci_dev->dev,
4259 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4262 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4264 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4265 while (!ddr_timeout) {
4266 set_current_state(TASK_INTERRUPTIBLE);
4267 if (schedule_timeout(HZ / 10)) {
4268 /* user probably pressed Control-C */
4272 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4274 dev_err(&octeon_dev->pci_dev->dev,
4275 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4280 if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4281 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4285 /* Divert uboot to take commands from host instead. */
4286 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4288 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4289 ret = octeon_init_consoles(octeon_dev);
4291 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4294 /* If console debug enabled, specify empty string to use default
4295 * enablement ELSE specify NULL string for 'disabled'.
4297 dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4298 ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4300 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4302 } else if (octeon_console_debug_enabled(0)) {
4303 /* If console was added AND we're logging console output
4304 * then set our console print function.
4306 octeon_dev->console[0].print = octeon_dbg_console_print;
4309 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4311 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4312 ret = load_firmware(octeon_dev);
4314 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4318 atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4321 handshake[octeon_dev->octeon_id].init_ok = 1;
4322 complete(&handshake[octeon_dev->octeon_id].init);
4324 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4325 oct_priv->dev = octeon_dev;
4331 * octeon_dbg_console_print - Debug console print function
4332 * @oct: octeon device
4333 * @console_num: console number
4334 * @prefix: first portion of line to display
4335 * @suffix: second portion of line to display
4337 * The OCTEON debug console outputs entire lines (excluding '\n').
4338 * Normally, the line will be passed in the 'prefix' parameter.
4339 * However, due to buffering, it is possible for a line to be split into two
4340 * parts, in which case they will be passed as the 'prefix' parameter and
4341 * 'suffix' parameter.
4343 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4344 char *prefix, char *suffix)
4346 if (prefix && suffix)
4347 dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4350 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4352 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4358 * liquidio_exit - Exits the module
4360 static void __exit liquidio_exit(void)
4362 liquidio_deinit_pci();
4364 pr_info("LiquidIO network module is now unloaded\n");
4367 module_init(liquidio_init);
4368 module_exit(liquidio_exit);