2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38 #include <linux/module.h>
39 #include <linux/moduleparam.h>
40 #include <linux/init.h>
41 #include <linux/pci.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/debugfs.h>
46 #include <linux/ethtool.h>
47 #include <linux/mdio.h>
49 #include "t4vf_common.h"
50 #include "t4vf_defs.h"
52 #include "../cxgb4/t4_regs.h"
53 #include "../cxgb4/t4_msg.h"
56 * Generic information about the driver.
58 #define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
66 * Default ethtool "message level" for adapters.
68 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
69 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
70 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
73 * The driver uses the best interrupt scheme available on a platform in the
74 * order MSI-X then MSI. This parameter determines which of these schemes the
75 * driver may consider as follows:
77 * msi = 2: choose from among MSI-X and MSI
78 * msi = 1: only consider MSI interrupts
80 * Note that unlike the Physical Function driver, this Virtual Function driver
81 * does _not_ support legacy INTx interrupts (this limitation is mandated by
82 * the PCI-E SR-IOV standard).
86 #define MSI_DEFAULT MSI_MSIX
88 static int msi = MSI_DEFAULT;
90 module_param(msi, int, 0644);
91 MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
94 * Fundamental constants.
95 * ======================
99 MAX_TXQ_ENTRIES = 16384,
100 MAX_RSPQ_ENTRIES = 16384,
101 MAX_RX_BUFFERS = 16384,
103 MIN_TXQ_ENTRIES = 32,
104 MIN_RSPQ_ENTRIES = 128,
108 * For purposes of manipulating the Free List size we need to
109 * recognize that Free Lists are actually Egress Queues (the host
110 * produces free buffers which the hardware consumes), Egress Queues
111 * indices are all in units of Egress Context Units bytes, and free
112 * list entries are 64-bit PCI DMA addresses. And since the state of
113 * the Producer Index == the Consumer Index implies an EMPTY list, we
114 * always have at least one Egress Unit's worth of Free List entries
115 * unused. See sge.c for more details ...
117 EQ_UNIT = SGE_EQ_IDXSIZE,
118 FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
119 MIN_FL_RESID = FL_PER_EQ_UNIT,
123 * Global driver state.
124 * ====================
127 static struct dentry *cxgb4vf_debugfs_root;
130 * OS "Callback" functions.
131 * ========================
135 * The link status has changed on the indicated "port" (Virtual Interface).
137 void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
139 struct net_device *dev = adapter->port[pidx];
142 * If the port is disabled or the current recorded "link up"
143 * status matches the new status, just return.
145 if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
149 * Tell the OS that the link status has changed and print a short
150 * informative message on the console about the event.
155 const struct port_info *pi = netdev_priv(dev);
157 netif_carrier_on(dev);
159 switch (pi->link_cfg.speed) {
184 switch ((int)pi->link_cfg.fc) {
193 case PAUSE_RX | PAUSE_TX:
202 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
204 netif_carrier_off(dev);
205 netdev_info(dev, "link down\n");
210 * THe port module type has changed on the indicated "port" (Virtual
213 void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
215 static const char * const mod_str[] = {
216 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
218 const struct net_device *dev = adapter->port[pidx];
219 const struct port_info *pi = netdev_priv(dev);
221 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
222 dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
224 else if (pi->mod_type < ARRAY_SIZE(mod_str))
225 dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
226 dev->name, mod_str[pi->mod_type]);
227 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
228 dev_info(adapter->pdev_dev, "%s: unsupported optical port "
229 "module inserted\n", dev->name);
230 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
231 dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
232 "forcing TWINAX\n", dev->name);
233 else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
234 dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
237 dev_info(adapter->pdev_dev, "%s: unknown module type %d "
238 "inserted\n", dev->name, pi->mod_type);
241 static int cxgb4vf_set_addr_hash(struct port_info *pi)
243 struct adapter *adapter = pi->adapter;
246 struct hash_mac_addr *entry;
248 /* Calculate the hash vector for the updated list and program it */
249 list_for_each_entry(entry, &adapter->mac_hlist, list) {
250 ucast |= is_unicast_ether_addr(entry->addr);
251 vec |= (1ULL << hash_mac_addr(entry->addr));
253 return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false);
257 * cxgb4vf_change_mac - Update match filter for a MAC address.
260 * @tcam_idx: TCAM index of existing filter for old value of MAC address,
262 * @addr: the new MAC address value
263 * @persistent: whether a new MAC allocation should be persistent
265 * Modifies an MPS filter and sets it to the new MAC address if
266 * @tcam_idx >= 0, or adds the MAC address to a new filter if
267 * @tcam_idx < 0. In the latter case the address is added persistently
268 * if @persist is %true.
269 * Addresses are programmed to hash region, if tcam runs out of entries.
272 static int cxgb4vf_change_mac(struct port_info *pi, unsigned int viid,
273 int *tcam_idx, const u8 *addr, bool persistent)
275 struct hash_mac_addr *new_entry, *entry;
276 struct adapter *adapter = pi->adapter;
279 ret = t4vf_change_mac(adapter, viid, *tcam_idx, addr, persistent);
280 /* We ran out of TCAM entries. try programming hash region. */
281 if (ret == -ENOMEM) {
282 /* If the MAC address to be updated is in the hash addr
283 * list, update it from the list
285 list_for_each_entry(entry, &adapter->mac_hlist, list) {
286 if (entry->iface_mac) {
287 ether_addr_copy(entry->addr, addr);
291 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
294 ether_addr_copy(new_entry->addr, addr);
295 new_entry->iface_mac = true;
296 list_add_tail(&new_entry->list, &adapter->mac_hlist);
298 ret = cxgb4vf_set_addr_hash(pi);
299 } else if (ret >= 0) {
308 * Net device operations.
309 * ======================
316 * Perform the MAC and PHY actions needed to enable a "port" (Virtual
319 static int link_start(struct net_device *dev)
322 struct port_info *pi = netdev_priv(dev);
325 * We do not set address filters and promiscuity here, the stack does
326 * that step explicitly. Enable vlan accel.
328 ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
331 ret = cxgb4vf_change_mac(pi, pi->viid,
333 dev->dev_addr, true);
336 * We don't need to actually "start the link" itself since the
337 * firmware will do that for us when the first Virtual Interface
338 * is enabled on a port.
341 ret = t4vf_enable_pi(pi->adapter, pi, true, true);
347 * Name the MSI-X interrupts.
349 static void name_msix_vecs(struct adapter *adapter)
351 int namelen = sizeof(adapter->msix_info[0].desc) - 1;
357 snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
358 "%s-FWeventq", adapter->name);
359 adapter->msix_info[MSIX_FW].desc[namelen] = 0;
364 for_each_port(adapter, pidx) {
365 struct net_device *dev = adapter->port[pidx];
366 const struct port_info *pi = netdev_priv(dev);
369 for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
370 snprintf(adapter->msix_info[msi].desc, namelen,
371 "%s-%d", dev->name, qs);
372 adapter->msix_info[msi].desc[namelen] = 0;
378 * Request all of our MSI-X resources.
380 static int request_msix_queue_irqs(struct adapter *adapter)
382 struct sge *s = &adapter->sge;
388 err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
389 0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
397 for_each_ethrxq(s, rxq) {
398 err = request_irq(adapter->msix_info[msi].vec,
399 t4vf_sge_intr_msix, 0,
400 adapter->msix_info[msi].desc,
401 &s->ethrxq[rxq].rspq);
410 free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
411 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
416 * Free our MSI-X resources.
418 static void free_msix_queue_irqs(struct adapter *adapter)
420 struct sge *s = &adapter->sge;
423 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
425 for_each_ethrxq(s, rxq)
426 free_irq(adapter->msix_info[msi++].vec,
427 &s->ethrxq[rxq].rspq);
431 * Turn on NAPI and start up interrupts on a response queue.
433 static void qenable(struct sge_rspq *rspq)
435 napi_enable(&rspq->napi);
438 * 0-increment the Going To Sleep register to start the timer and
441 t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
443 SEINTARM_V(rspq->intr_params) |
444 INGRESSQID_V(rspq->cntxt_id));
448 * Enable NAPI scheduling and interrupt generation for all Receive Queues.
450 static void enable_rx(struct adapter *adapter)
453 struct sge *s = &adapter->sge;
455 for_each_ethrxq(s, rxq)
456 qenable(&s->ethrxq[rxq].rspq);
457 qenable(&s->fw_evtq);
460 * The interrupt queue doesn't use NAPI so we do the 0-increment of
461 * its Going To Sleep register here to get it started.
463 if (adapter->flags & CXGB4VF_USING_MSI)
464 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
466 SEINTARM_V(s->intrq.intr_params) |
467 INGRESSQID_V(s->intrq.cntxt_id));
472 * Wait until all NAPI handlers are descheduled.
474 static void quiesce_rx(struct adapter *adapter)
476 struct sge *s = &adapter->sge;
479 for_each_ethrxq(s, rxq)
480 napi_disable(&s->ethrxq[rxq].rspq.napi);
481 napi_disable(&s->fw_evtq.napi);
485 * Response queue handler for the firmware event queue.
487 static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
488 const struct pkt_gl *gl)
491 * Extract response opcode and get pointer to CPL message body.
493 struct adapter *adapter = rspq->adapter;
494 u8 opcode = ((const struct rss_header *)rsp)->opcode;
495 void *cpl = (void *)(rsp + 1);
500 * We've received an asynchronous message from the firmware.
502 const struct cpl_fw6_msg *fw_msg = cpl;
503 if (fw_msg->type == FW6_TYPE_CMD_RPL)
504 t4vf_handle_fw_rpl(adapter, fw_msg->data);
509 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
511 const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
512 opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
513 if (opcode != CPL_SGE_EGR_UPDATE) {
514 dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
522 case CPL_SGE_EGR_UPDATE: {
524 * We've received an Egress Queue Status Update message. We
525 * get these, if the SGE is configured to send these when the
526 * firmware passes certain points in processing our TX
527 * Ethernet Queue or if we make an explicit request for one.
528 * We use these updates to determine when we may need to
529 * restart a TX Ethernet Queue which was stopped for lack of
530 * free TX Queue Descriptors ...
532 const struct cpl_sge_egr_update *p = cpl;
533 unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
534 struct sge *s = &adapter->sge;
536 struct sge_eth_txq *txq;
540 * Perform sanity checking on the Queue ID to make sure it
541 * really refers to one of our TX Ethernet Egress Queues which
542 * is active and matches the queue's ID. None of these error
543 * conditions should ever happen so we may want to either make
544 * them fatal and/or conditionalized under DEBUG.
546 eq_idx = EQ_IDX(s, qid);
547 if (unlikely(eq_idx >= MAX_EGRQ)) {
548 dev_err(adapter->pdev_dev,
549 "Egress Update QID %d out of range\n", qid);
552 tq = s->egr_map[eq_idx];
553 if (unlikely(tq == NULL)) {
554 dev_err(adapter->pdev_dev,
555 "Egress Update QID %d TXQ=NULL\n", qid);
558 txq = container_of(tq, struct sge_eth_txq, q);
559 if (unlikely(tq->abs_id != qid)) {
560 dev_err(adapter->pdev_dev,
561 "Egress Update QID %d refers to TXQ %d\n",
567 * Restart a stopped TX Queue which has less than half of its
571 netif_tx_wake_queue(txq->txq);
576 dev_err(adapter->pdev_dev,
577 "unexpected CPL %#x on FW event queue\n", opcode);
584 * Allocate SGE TX/RX response queues. Determine how many sets of SGE queues
585 * to use and initializes them. We support multiple "Queue Sets" per port if
586 * we have MSI-X, otherwise just one queue set per port.
588 static int setup_sge_queues(struct adapter *adapter)
590 struct sge *s = &adapter->sge;
594 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
597 bitmap_zero(s->starving_fl, MAX_EGRQ);
600 * If we're using MSI interrupt mode we need to set up a "forwarded
601 * interrupt" queue which we'll set up with our MSI vector. The rest
602 * of the ingress queues will be set up to forward their interrupts to
603 * this queue ... This must be first since t4vf_sge_alloc_rxq() uses
604 * the intrq's queue ID as the interrupt forwarding queue for the
605 * subsequent calls ...
607 if (adapter->flags & CXGB4VF_USING_MSI) {
608 err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
609 adapter->port[0], 0, NULL, NULL);
611 goto err_free_queues;
615 * Allocate our ingress queue for asynchronous firmware messages.
617 err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
618 MSIX_FW, NULL, fwevtq_handler);
620 goto err_free_queues;
623 * Allocate each "port"'s initial Queue Sets. These can be changed
624 * later on ... up to the point where any interface on the adapter is
625 * brought up at which point lots of things get nailed down
629 for_each_port(adapter, pidx) {
630 struct net_device *dev = adapter->port[pidx];
631 struct port_info *pi = netdev_priv(dev);
632 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
633 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
636 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
637 err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
639 &rxq->fl, t4vf_ethrx_handler);
641 goto err_free_queues;
643 err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
644 netdev_get_tx_queue(dev, qs),
645 s->fw_evtq.cntxt_id);
647 goto err_free_queues;
650 memset(&rxq->stats, 0, sizeof(rxq->stats));
655 * Create the reverse mappings for the queues.
657 s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
658 s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
659 IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
660 for_each_port(adapter, pidx) {
661 struct net_device *dev = adapter->port[pidx];
662 struct port_info *pi = netdev_priv(dev);
663 struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
664 struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
667 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
668 IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
669 EQ_MAP(s, txq->q.abs_id) = &txq->q;
672 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
673 * for Free Lists but since all of the Egress Queues
674 * (including Free Lists) have Relative Queue IDs
675 * which are computed as Absolute - Base Queue ID, we
676 * can synthesize the Absolute Queue IDs for the Free
677 * Lists. This is useful for debugging purposes when
678 * we want to dump Queue Contexts via the PF Driver.
680 rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
681 EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
687 t4vf_free_sge_resources(adapter);
692 * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
693 * queues. We configure the RSS CPU lookup table to distribute to the number
694 * of HW receive queues, and the response queue lookup table to narrow that
695 * down to the response queues actually configured for each "port" (Virtual
696 * Interface). We always configure the RSS mapping for all ports since the
697 * mapping table has plenty of entries.
699 static int setup_rss(struct adapter *adapter)
703 for_each_port(adapter, pidx) {
704 struct port_info *pi = adap2pinfo(adapter, pidx);
705 struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
706 u16 rss[MAX_PORT_QSETS];
709 for (qs = 0; qs < pi->nqsets; qs++)
710 rss[qs] = rxq[qs].rspq.abs_id;
712 err = t4vf_config_rss_range(adapter, pi->viid,
713 0, pi->rss_size, rss, pi->nqsets);
718 * Perform Global RSS Mode-specific initialization.
720 switch (adapter->params.rss.mode) {
721 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
723 * If Tunnel All Lookup isn't specified in the global
724 * RSS Configuration, then we need to specify a
725 * default Ingress Queue for any ingress packets which
726 * aren't hashed. We'll use our first ingress queue
729 if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
730 union rss_vi_config config;
731 err = t4vf_read_rss_vi_config(adapter,
736 config.basicvirtual.defaultq =
738 err = t4vf_write_rss_vi_config(adapter,
752 * Bring the adapter up. Called whenever we go from no "ports" open to having
753 * one open. This function performs the actions necessary to make an adapter
754 * operational, such as completing the initialization of HW modules, and
755 * enabling interrupts. Must be called with the rtnl lock held. (Note that
756 * this is called "cxgb_up" in the PF Driver.)
758 static int adapter_up(struct adapter *adapter)
763 * If this is the first time we've been called, perform basic
764 * adapter setup. Once we've done this, many of our adapter
765 * parameters can no longer be changed ...
767 if ((adapter->flags & CXGB4VF_FULL_INIT_DONE) == 0) {
768 err = setup_sge_queues(adapter);
771 err = setup_rss(adapter);
773 t4vf_free_sge_resources(adapter);
777 if (adapter->flags & CXGB4VF_USING_MSIX)
778 name_msix_vecs(adapter);
780 adapter->flags |= CXGB4VF_FULL_INIT_DONE;
784 * Acquire our interrupt resources. We only support MSI-X and MSI.
786 BUG_ON((adapter->flags &
787 (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
788 if (adapter->flags & CXGB4VF_USING_MSIX)
789 err = request_msix_queue_irqs(adapter);
791 err = request_irq(adapter->pdev->irq,
792 t4vf_intr_handler(adapter), 0,
793 adapter->name, adapter);
795 dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
801 * Enable NAPI ingress processing and return success.
804 t4vf_sge_start(adapter);
810 * Bring the adapter down. Called whenever the last "port" (Virtual
811 * Interface) closed. (Note that this routine is called "cxgb_down" in the PF
814 static void adapter_down(struct adapter *adapter)
817 * Free interrupt resources.
819 if (adapter->flags & CXGB4VF_USING_MSIX)
820 free_msix_queue_irqs(adapter);
822 free_irq(adapter->pdev->irq, adapter);
825 * Wait for NAPI handlers to finish.
831 * Start up a net device.
833 static int cxgb4vf_open(struct net_device *dev)
836 struct port_info *pi = netdev_priv(dev);
837 struct adapter *adapter = pi->adapter;
840 * If we don't have a connection to the firmware there's nothing we
843 if (!(adapter->flags & CXGB4VF_FW_OK))
847 * If this is the first interface that we're opening on the "adapter",
848 * bring the "adapter" up now.
850 if (adapter->open_device_map == 0) {
851 err = adapter_up(adapter);
856 /* It's possible that the basic port information could have
857 * changed since we first read it.
859 err = t4vf_update_port_info(pi);
864 * Note that this interface is up and start everything up ...
866 err = link_start(dev);
870 pi->vlan_id = t4vf_get_vf_vlan_acl(adapter);
872 netif_tx_start_all_queues(dev);
873 set_bit(pi->port_id, &adapter->open_device_map);
877 if (adapter->open_device_map == 0)
878 adapter_down(adapter);
883 * Shut down a net device. This routine is called "cxgb_close" in the PF
886 static int cxgb4vf_stop(struct net_device *dev)
888 struct port_info *pi = netdev_priv(dev);
889 struct adapter *adapter = pi->adapter;
891 netif_tx_stop_all_queues(dev);
892 netif_carrier_off(dev);
893 t4vf_enable_pi(adapter, pi, false, false);
895 clear_bit(pi->port_id, &adapter->open_device_map);
896 if (adapter->open_device_map == 0)
897 adapter_down(adapter);
902 * Translate our basic statistics into the standard "ifconfig" statistics.
904 static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
906 struct t4vf_port_stats stats;
907 struct port_info *pi = netdev2pinfo(dev);
908 struct adapter *adapter = pi->adapter;
909 struct net_device_stats *ns = &dev->stats;
912 spin_lock(&adapter->stats_lock);
913 err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
914 spin_unlock(&adapter->stats_lock);
916 memset(ns, 0, sizeof(*ns));
920 ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
921 stats.tx_ucast_bytes + stats.tx_offload_bytes);
922 ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
923 stats.tx_ucast_frames + stats.tx_offload_frames);
924 ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
925 stats.rx_ucast_bytes);
926 ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
927 stats.rx_ucast_frames);
928 ns->multicast = stats.rx_mcast_frames;
929 ns->tx_errors = stats.tx_drop_frames;
930 ns->rx_errors = stats.rx_err_frames;
935 static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr)
937 struct port_info *pi = netdev_priv(netdev);
938 struct adapter *adapter = pi->adapter;
943 bool ucast = is_unicast_ether_addr(mac_addr);
944 const u8 *maclist[1] = {mac_addr};
945 struct hash_mac_addr *new_entry;
947 ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist,
948 NULL, ucast ? &uhash : &mhash, false);
951 /* if hash != 0, then add the addr to hash addr list
952 * so on the end we will calculate the hash for the
953 * list and program it
955 if (uhash || mhash) {
956 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
959 ether_addr_copy(new_entry->addr, mac_addr);
960 list_add_tail(&new_entry->list, &adapter->mac_hlist);
961 ret = cxgb4vf_set_addr_hash(pi);
964 return ret < 0 ? ret : 0;
967 static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
969 struct port_info *pi = netdev_priv(netdev);
970 struct adapter *adapter = pi->adapter;
972 const u8 *maclist[1] = {mac_addr};
973 struct hash_mac_addr *entry, *tmp;
975 /* If the MAC address to be removed is in the hash addr
976 * list, delete it from the list and update hash vector
978 list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
979 if (ether_addr_equal(entry->addr, mac_addr)) {
980 list_del(&entry->list);
982 return cxgb4vf_set_addr_hash(pi);
986 ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false);
987 return ret < 0 ? -EINVAL : 0;
991 * Set RX properties of a port, such as promiscruity, address filters, and MTU.
992 * If @mtu is -1 it is left unchanged.
994 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
996 struct port_info *pi = netdev_priv(dev);
998 __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
999 __dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
1000 return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
1001 (dev->flags & IFF_PROMISC) != 0,
1002 (dev->flags & IFF_ALLMULTI) != 0,
1007 * Set the current receive modes on the device.
1009 static void cxgb4vf_set_rxmode(struct net_device *dev)
1011 /* unfortunately we can't return errors to the stack */
1012 set_rxmode(dev, -1, false);
1016 * Find the entry in the interrupt holdoff timer value array which comes
1017 * closest to the specified interrupt holdoff value.
1019 static int closest_timer(const struct sge *s, int us)
1021 int i, timer_idx = 0, min_delta = INT_MAX;
1023 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1024 int delta = us - s->timer_val[i];
1027 if (delta < min_delta) {
1035 static int closest_thres(const struct sge *s, int thres)
1037 int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
1039 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1040 delta = thres - s->counter_val[i];
1043 if (delta < min_delta) {
1052 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1054 static unsigned int qtimer_val(const struct adapter *adapter,
1055 const struct sge_rspq *rspq)
1057 unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
1059 return timer_idx < SGE_NTIMERS
1060 ? adapter->sge.timer_val[timer_idx]
1065 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1066 * @adapter: the adapter
1067 * @rspq: the RX response queue
1068 * @us: the hold-off time in us, or 0 to disable timer
1069 * @cnt: the hold-off packet count, or 0 to disable counter
1071 * Sets an RX response queue's interrupt hold-off time and packet count.
1072 * At least one of the two needs to be enabled for the queue to generate
1075 static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
1076 unsigned int us, unsigned int cnt)
1078 unsigned int timer_idx;
1081 * If both the interrupt holdoff timer and count are specified as
1082 * zero, default to a holdoff count of 1 ...
1084 if ((us | cnt) == 0)
1088 * If an interrupt holdoff count has been specified, then find the
1089 * closest configured holdoff count and use that. If the response
1090 * queue has already been created, then update its queue context
1097 pktcnt_idx = closest_thres(&adapter->sge, cnt);
1098 if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
1099 v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1100 FW_PARAMS_PARAM_X_V(
1101 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1102 FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id);
1103 err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
1107 rspq->pktcnt_idx = pktcnt_idx;
1111 * Compute the closest holdoff timer index from the supplied holdoff
1114 timer_idx = (us == 0
1115 ? SGE_TIMER_RSTRT_CNTR
1116 : closest_timer(&adapter->sge, us));
1119 * Update the response queue's interrupt coalescing parameters and
1122 rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
1123 QINTR_CNT_EN_V(cnt > 0));
1128 * Return a version number to identify the type of adapter. The scheme is:
1129 * - bits 0..9: chip version
1130 * - bits 10..15: chip revision
1132 static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1135 * Chip version 4, revision 0x3f (cxgb4vf).
1137 return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
1141 * Execute the specified ioctl command.
1143 static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1149 * The VF Driver doesn't have access to any of the other
1150 * common Ethernet device ioctl()'s (like reading/writing
1151 * PHY registers, etc.
1162 * Change the device's MTU.
1164 static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
1167 struct port_info *pi = netdev_priv(dev);
1169 ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
1170 -1, -1, -1, -1, true);
1176 static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
1177 netdev_features_t features)
1180 * Since there is no support for separate rx/tx vlan accel
1181 * enable/disable make sure tx flag is always in same state as rx.
1183 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1184 features |= NETIF_F_HW_VLAN_CTAG_TX;
1186 features &= ~NETIF_F_HW_VLAN_CTAG_TX;
1191 static int cxgb4vf_set_features(struct net_device *dev,
1192 netdev_features_t features)
1194 struct port_info *pi = netdev_priv(dev);
1195 netdev_features_t changed = dev->features ^ features;
1197 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1198 t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
1199 features & NETIF_F_HW_VLAN_CTAG_TX, 0);
1205 * Change the devices MAC address.
1207 static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1210 struct sockaddr *addr = _addr;
1211 struct port_info *pi = netdev_priv(dev);
1213 if (!is_valid_ether_addr(addr->sa_data))
1214 return -EADDRNOTAVAIL;
1216 ret = cxgb4vf_change_mac(pi, pi->viid, &pi->xact_addr_filt,
1217 addr->sa_data, true);
1221 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1225 #ifdef CONFIG_NET_POLL_CONTROLLER
1227 * Poll all of our receive queues. This is called outside of normal interrupt
1230 static void cxgb4vf_poll_controller(struct net_device *dev)
1232 struct port_info *pi = netdev_priv(dev);
1233 struct adapter *adapter = pi->adapter;
1235 if (adapter->flags & CXGB4VF_USING_MSIX) {
1236 struct sge_eth_rxq *rxq;
1239 rxq = &adapter->sge.ethrxq[pi->first_qset];
1240 for (nqsets = pi->nqsets; nqsets; nqsets--) {
1241 t4vf_sge_intr_msix(0, &rxq->rspq);
1245 t4vf_intr_handler(adapter)(0, adapter);
1250 * Ethtool operations.
1251 * ===================
1253 * Note that we don't support any ethtool operations which change the physical
1254 * state of the port to which we're linked.
1258 * from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
1259 * @port_type: Firmware Port Type
1260 * @mod_type: Firmware Module Type
1262 * Translate Firmware Port/Module type to Ethtool Port Type.
1264 static int from_fw_port_mod_type(enum fw_port_type port_type,
1265 enum fw_port_module_type mod_type)
1267 if (port_type == FW_PORT_TYPE_BT_SGMII ||
1268 port_type == FW_PORT_TYPE_BT_XFI ||
1269 port_type == FW_PORT_TYPE_BT_XAUI) {
1271 } else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
1272 port_type == FW_PORT_TYPE_FIBER_XAUI) {
1274 } else if (port_type == FW_PORT_TYPE_SFP ||
1275 port_type == FW_PORT_TYPE_QSFP_10G ||
1276 port_type == FW_PORT_TYPE_QSA ||
1277 port_type == FW_PORT_TYPE_QSFP ||
1278 port_type == FW_PORT_TYPE_CR4_QSFP ||
1279 port_type == FW_PORT_TYPE_CR_QSFP ||
1280 port_type == FW_PORT_TYPE_CR2_QSFP ||
1281 port_type == FW_PORT_TYPE_SFP28) {
1282 if (mod_type == FW_PORT_MOD_TYPE_LR ||
1283 mod_type == FW_PORT_MOD_TYPE_SR ||
1284 mod_type == FW_PORT_MOD_TYPE_ER ||
1285 mod_type == FW_PORT_MOD_TYPE_LRM)
1287 else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1288 mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1292 } else if (port_type == FW_PORT_TYPE_KR4_100G ||
1293 port_type == FW_PORT_TYPE_KR_SFP28 ||
1294 port_type == FW_PORT_TYPE_KR_XLAUI) {
1302 * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
1303 * @port_type: Firmware Port Type
1304 * @fw_caps: Firmware Port Capabilities
1305 * @link_mode_mask: ethtool Link Mode Mask
1307 * Translate a Firmware Port Capabilities specification to an ethtool
1310 static void fw_caps_to_lmm(enum fw_port_type port_type,
1311 unsigned int fw_caps,
1312 unsigned long *link_mode_mask)
1314 #define SET_LMM(__lmm_name) \
1315 __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
1318 #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
1320 if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
1321 SET_LMM(__lmm_name); \
1324 switch (port_type) {
1325 case FW_PORT_TYPE_BT_SGMII:
1326 case FW_PORT_TYPE_BT_XFI:
1327 case FW_PORT_TYPE_BT_XAUI:
1329 FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
1330 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1331 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1334 case FW_PORT_TYPE_KX4:
1335 case FW_PORT_TYPE_KX:
1337 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1338 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
1341 case FW_PORT_TYPE_KR:
1343 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1346 case FW_PORT_TYPE_BP_AP:
1348 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1349 FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
1350 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1353 case FW_PORT_TYPE_BP4_AP:
1355 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1356 FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
1357 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1358 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
1361 case FW_PORT_TYPE_FIBER_XFI:
1362 case FW_PORT_TYPE_FIBER_XAUI:
1363 case FW_PORT_TYPE_SFP:
1364 case FW_PORT_TYPE_QSFP_10G:
1365 case FW_PORT_TYPE_QSA:
1367 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1368 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1371 case FW_PORT_TYPE_BP40_BA:
1372 case FW_PORT_TYPE_QSFP:
1374 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1375 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1376 FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
1379 case FW_PORT_TYPE_CR_QSFP:
1380 case FW_PORT_TYPE_SFP28:
1382 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1383 FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1384 FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
1387 case FW_PORT_TYPE_KR_SFP28:
1389 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1390 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1391 FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
1394 case FW_PORT_TYPE_KR_XLAUI:
1396 FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1397 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1398 FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
1401 case FW_PORT_TYPE_CR2_QSFP:
1403 FW_CAPS_TO_LMM(SPEED_50G, 50000baseSR2_Full);
1406 case FW_PORT_TYPE_KR4_100G:
1407 case FW_PORT_TYPE_CR4_QSFP:
1409 FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1410 FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1411 FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
1412 FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
1413 FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full);
1414 FW_CAPS_TO_LMM(SPEED_100G, 100000baseCR4_Full);
1421 if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) {
1422 FW_CAPS_TO_LMM(FEC_RS, FEC_RS);
1423 FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER);
1428 FW_CAPS_TO_LMM(ANEG, Autoneg);
1429 FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
1430 FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
1432 #undef FW_CAPS_TO_LMM
1436 static int cxgb4vf_get_link_ksettings(struct net_device *dev,
1437 struct ethtool_link_ksettings *link_ksettings)
1439 struct port_info *pi = netdev_priv(dev);
1440 struct ethtool_link_settings *base = &link_ksettings->base;
1442 /* For the nonce, the Firmware doesn't send up Port State changes
1443 * when the Virtual Interface attached to the Port is down. So
1444 * if it's down, let's grab any changes.
1446 if (!netif_running(dev))
1447 (void)t4vf_update_port_info(pi);
1449 ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
1450 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
1451 ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
1453 base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
1455 if (pi->mdio_addr >= 0) {
1456 base->phy_address = pi->mdio_addr;
1457 base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
1458 ? ETH_MDIO_SUPPORTS_C22
1459 : ETH_MDIO_SUPPORTS_C45);
1461 base->phy_address = 255;
1462 base->mdio_support = 0;
1465 fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
1466 link_ksettings->link_modes.supported);
1467 fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps,
1468 link_ksettings->link_modes.advertising);
1469 fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
1470 link_ksettings->link_modes.lp_advertising);
1472 if (netif_carrier_ok(dev)) {
1473 base->speed = pi->link_cfg.speed;
1474 base->duplex = DUPLEX_FULL;
1476 base->speed = SPEED_UNKNOWN;
1477 base->duplex = DUPLEX_UNKNOWN;
1480 base->autoneg = pi->link_cfg.autoneg;
1481 if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
1482 ethtool_link_ksettings_add_link_mode(link_ksettings,
1483 supported, Autoneg);
1484 if (pi->link_cfg.autoneg)
1485 ethtool_link_ksettings_add_link_mode(link_ksettings,
1486 advertising, Autoneg);
1491 /* Translate the Firmware FEC value into the ethtool value. */
1492 static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec)
1494 unsigned int eth_fec = 0;
1496 if (fw_fec & FW_PORT_CAP32_FEC_RS)
1497 eth_fec |= ETHTOOL_FEC_RS;
1498 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
1499 eth_fec |= ETHTOOL_FEC_BASER;
1501 /* if nothing is set, then FEC is off */
1503 eth_fec = ETHTOOL_FEC_OFF;
1508 /* Translate Common Code FEC value into ethtool value. */
1509 static inline unsigned int cc_to_eth_fec(unsigned int cc_fec)
1511 unsigned int eth_fec = 0;
1513 if (cc_fec & FEC_AUTO)
1514 eth_fec |= ETHTOOL_FEC_AUTO;
1515 if (cc_fec & FEC_RS)
1516 eth_fec |= ETHTOOL_FEC_RS;
1517 if (cc_fec & FEC_BASER_RS)
1518 eth_fec |= ETHTOOL_FEC_BASER;
1520 /* if nothing is set, then FEC is off */
1522 eth_fec = ETHTOOL_FEC_OFF;
1527 static int cxgb4vf_get_fecparam(struct net_device *dev,
1528 struct ethtool_fecparam *fec)
1530 const struct port_info *pi = netdev_priv(dev);
1531 const struct link_config *lc = &pi->link_cfg;
1533 /* Translate the Firmware FEC Support into the ethtool value. We
1534 * always support IEEE 802.3 "automatic" selection of Link FEC type if
1535 * any FEC is supported.
1537 fec->fec = fwcap_to_eth_fec(lc->pcaps);
1538 if (fec->fec != ETHTOOL_FEC_OFF)
1539 fec->fec |= ETHTOOL_FEC_AUTO;
1541 /* Translate the current internal FEC parameters into the
1544 fec->active_fec = cc_to_eth_fec(lc->fec);
1549 * Return our driver information.
1551 static void cxgb4vf_get_drvinfo(struct net_device *dev,
1552 struct ethtool_drvinfo *drvinfo)
1554 struct adapter *adapter = netdev2adap(dev);
1556 strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
1557 strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
1558 sizeof(drvinfo->bus_info));
1559 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1560 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1561 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev),
1562 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev),
1563 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev),
1564 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev),
1565 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev),
1566 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev),
1567 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev),
1568 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev));
1572 * Return current adapter message level.
1574 static u32 cxgb4vf_get_msglevel(struct net_device *dev)
1576 return netdev2adap(dev)->msg_enable;
1580 * Set current adapter message level.
1582 static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
1584 netdev2adap(dev)->msg_enable = msglevel;
1588 * Return the device's current Queue Set ring size parameters along with the
1589 * allowed maximum values. Since ethtool doesn't understand the concept of
1590 * multi-queue devices, we just return the current values associated with the
1593 static void cxgb4vf_get_ringparam(struct net_device *dev,
1594 struct ethtool_ringparam *rp)
1596 const struct port_info *pi = netdev_priv(dev);
1597 const struct sge *s = &pi->adapter->sge;
1599 rp->rx_max_pending = MAX_RX_BUFFERS;
1600 rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1601 rp->rx_jumbo_max_pending = 0;
1602 rp->tx_max_pending = MAX_TXQ_ENTRIES;
1604 rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
1605 rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1606 rp->rx_jumbo_pending = 0;
1607 rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
1611 * Set the Queue Set ring size parameters for the device. Again, since
1612 * ethtool doesn't allow for the concept of multiple queues per device, we'll
1613 * apply these new values across all of the Queue Sets associated with the
1614 * device -- after vetting them of course!
1616 static int cxgb4vf_set_ringparam(struct net_device *dev,
1617 struct ethtool_ringparam *rp)
1619 const struct port_info *pi = netdev_priv(dev);
1620 struct adapter *adapter = pi->adapter;
1621 struct sge *s = &adapter->sge;
1624 if (rp->rx_pending > MAX_RX_BUFFERS ||
1625 rp->rx_jumbo_pending ||
1626 rp->tx_pending > MAX_TXQ_ENTRIES ||
1627 rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1628 rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1629 rp->rx_pending < MIN_FL_ENTRIES ||
1630 rp->tx_pending < MIN_TXQ_ENTRIES)
1633 if (adapter->flags & CXGB4VF_FULL_INIT_DONE)
1636 for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
1637 s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
1638 s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
1639 s->ethtxq[qs].q.size = rp->tx_pending;
1645 * Return the interrupt holdoff timer and count for the first Queue Set on the
1646 * device. Our extension ioctl() (the cxgbtool interface) allows the
1647 * interrupt holdoff timer to be read on all of the device's Queue Sets.
1649 static int cxgb4vf_get_coalesce(struct net_device *dev,
1650 struct ethtool_coalesce *coalesce)
1652 const struct port_info *pi = netdev_priv(dev);
1653 const struct adapter *adapter = pi->adapter;
1654 const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
1656 coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
1657 coalesce->rx_max_coalesced_frames =
1658 ((rspq->intr_params & QINTR_CNT_EN_F)
1659 ? adapter->sge.counter_val[rspq->pktcnt_idx]
1665 * Set the RX interrupt holdoff timer and count for the first Queue Set on the
1666 * interface. Our extension ioctl() (the cxgbtool interface) allows us to set
1667 * the interrupt holdoff timer on any of the device's Queue Sets.
1669 static int cxgb4vf_set_coalesce(struct net_device *dev,
1670 struct ethtool_coalesce *coalesce)
1672 const struct port_info *pi = netdev_priv(dev);
1673 struct adapter *adapter = pi->adapter;
1675 return set_rxq_intr_params(adapter,
1676 &adapter->sge.ethrxq[pi->first_qset].rspq,
1677 coalesce->rx_coalesce_usecs,
1678 coalesce->rx_max_coalesced_frames);
1682 * Report current port link pause parameter settings.
1684 static void cxgb4vf_get_pauseparam(struct net_device *dev,
1685 struct ethtool_pauseparam *pauseparam)
1687 struct port_info *pi = netdev_priv(dev);
1689 pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1690 pauseparam->rx_pause = (pi->link_cfg.advertised_fc & PAUSE_RX) != 0;
1691 pauseparam->tx_pause = (pi->link_cfg.advertised_fc & PAUSE_TX) != 0;
1695 * Identify the port by blinking the port's LED.
1697 static int cxgb4vf_phys_id(struct net_device *dev,
1698 enum ethtool_phys_id_state state)
1701 struct port_info *pi = netdev_priv(dev);
1703 if (state == ETHTOOL_ID_ACTIVE)
1705 else if (state == ETHTOOL_ID_INACTIVE)
1710 return t4vf_identify_port(pi->adapter, pi->viid, val);
1714 * Port stats maintained per queue of the port.
1716 struct queue_port_stats {
1727 * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that
1728 * these need to match the order of statistics returned by
1729 * t4vf_get_port_stats().
1731 static const char stats_strings[][ETH_GSTRING_LEN] = {
1733 * These must match the layout of the t4vf_port_stats structure.
1735 "TxBroadcastBytes ",
1736 "TxBroadcastFrames ",
1737 "TxMulticastBytes ",
1738 "TxMulticastFrames ",
1744 "RxBroadcastBytes ",
1745 "RxBroadcastFrames ",
1746 "RxMulticastBytes ",
1747 "RxMulticastFrames ",
1753 * These are accumulated per-queue statistics and must match the
1754 * order of the fields in the queue_port_stats structure.
1766 * Return the number of statistics in the specified statistics set.
1768 static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
1772 return ARRAY_SIZE(stats_strings);
1780 * Return the strings for the specified statistics set.
1782 static void cxgb4vf_get_strings(struct net_device *dev,
1788 memcpy(data, stats_strings, sizeof(stats_strings));
1794 * Small utility routine to accumulate queue statistics across the queues of
1797 static void collect_sge_port_stats(const struct adapter *adapter,
1798 const struct port_info *pi,
1799 struct queue_port_stats *stats)
1801 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
1802 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
1805 memset(stats, 0, sizeof(*stats));
1806 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
1807 stats->tso += txq->tso;
1808 stats->tx_csum += txq->tx_cso;
1809 stats->rx_csum += rxq->stats.rx_cso;
1810 stats->vlan_ex += rxq->stats.vlan_ex;
1811 stats->vlan_ins += txq->vlan_ins;
1812 stats->lro_pkts += rxq->stats.lro_pkts;
1813 stats->lro_merged += rxq->stats.lro_merged;
1818 * Return the ETH_SS_STATS statistics set.
1820 static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
1821 struct ethtool_stats *stats,
1824 struct port_info *pi = netdev2pinfo(dev);
1825 struct adapter *adapter = pi->adapter;
1826 int err = t4vf_get_port_stats(adapter, pi->pidx,
1827 (struct t4vf_port_stats *)data);
1829 memset(data, 0, sizeof(struct t4vf_port_stats));
1831 data += sizeof(struct t4vf_port_stats) / sizeof(u64);
1832 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1836 * Return the size of our register map.
1838 static int cxgb4vf_get_regs_len(struct net_device *dev)
1840 return T4VF_REGMAP_SIZE;
1844 * Dump a block of registers, start to end inclusive, into a buffer.
1846 static void reg_block_dump(struct adapter *adapter, void *regbuf,
1847 unsigned int start, unsigned int end)
1849 u32 *bp = regbuf + start - T4VF_REGMAP_START;
1851 for ( ; start <= end; start += sizeof(u32)) {
1853 * Avoid reading the Mailbox Control register since that
1854 * can trigger a Mailbox Ownership Arbitration cycle and
1855 * interfere with communication with the firmware.
1857 if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
1860 *bp++ = t4_read_reg(adapter, start);
1865 * Copy our entire register map into the provided buffer.
1867 static void cxgb4vf_get_regs(struct net_device *dev,
1868 struct ethtool_regs *regs,
1871 struct adapter *adapter = netdev2adap(dev);
1873 regs->version = mk_adap_vers(adapter);
1876 * Fill in register buffer with our register map.
1878 memset(regbuf, 0, T4VF_REGMAP_SIZE);
1880 reg_block_dump(adapter, regbuf,
1881 T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
1882 T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
1883 reg_block_dump(adapter, regbuf,
1884 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
1885 T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
1887 /* T5 adds new registers in the PL Register map.
1889 reg_block_dump(adapter, regbuf,
1890 T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
1891 T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
1892 ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
1893 reg_block_dump(adapter, regbuf,
1894 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1895 T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
1897 reg_block_dump(adapter, regbuf,
1898 T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
1899 T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
1903 * Report current Wake On LAN settings.
1905 static void cxgb4vf_get_wol(struct net_device *dev,
1906 struct ethtool_wolinfo *wol)
1910 memset(&wol->sopass, 0, sizeof(wol->sopass));
1914 * TCP Segmentation Offload flags which we support.
1916 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1917 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
1918 NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
1920 static const struct ethtool_ops cxgb4vf_ethtool_ops = {
1921 .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1922 ETHTOOL_COALESCE_RX_MAX_FRAMES,
1923 .get_link_ksettings = cxgb4vf_get_link_ksettings,
1924 .get_fecparam = cxgb4vf_get_fecparam,
1925 .get_drvinfo = cxgb4vf_get_drvinfo,
1926 .get_msglevel = cxgb4vf_get_msglevel,
1927 .set_msglevel = cxgb4vf_set_msglevel,
1928 .get_ringparam = cxgb4vf_get_ringparam,
1929 .set_ringparam = cxgb4vf_set_ringparam,
1930 .get_coalesce = cxgb4vf_get_coalesce,
1931 .set_coalesce = cxgb4vf_set_coalesce,
1932 .get_pauseparam = cxgb4vf_get_pauseparam,
1933 .get_link = ethtool_op_get_link,
1934 .get_strings = cxgb4vf_get_strings,
1935 .set_phys_id = cxgb4vf_phys_id,
1936 .get_sset_count = cxgb4vf_get_sset_count,
1937 .get_ethtool_stats = cxgb4vf_get_ethtool_stats,
1938 .get_regs_len = cxgb4vf_get_regs_len,
1939 .get_regs = cxgb4vf_get_regs,
1940 .get_wol = cxgb4vf_get_wol,
1944 * /sys/kernel/debug/cxgb4vf support code and data.
1945 * ================================================
1949 * Show Firmware Mailbox Command/Reply Log
1951 * Note that we don't do any locking when dumping the Firmware Mailbox Log so
1952 * it's possible that we can catch things during a log update and therefore
1953 * see partially corrupted log entries. But i9t's probably Good Enough(tm).
1954 * If we ever decide that we want to make sure that we're dumping a coherent
1955 * log, we'd need to perform locking in the mailbox logging and in
1956 * mboxlog_open() where we'd need to grab the entire mailbox log in one go
1957 * like we do for the Firmware Device Log. But as stated above, meh ...
1959 static int mboxlog_show(struct seq_file *seq, void *v)
1961 struct adapter *adapter = seq->private;
1962 struct mbox_cmd_log *log = adapter->mbox_log;
1963 struct mbox_cmd *entry;
1966 if (v == SEQ_START_TOKEN) {
1968 "%10s %15s %5s %5s %s\n",
1969 "Seq#", "Tstamp", "Atime", "Etime",
1974 entry_idx = log->cursor + ((uintptr_t)v - 2);
1975 if (entry_idx >= log->size)
1976 entry_idx -= log->size;
1977 entry = mbox_cmd_log_entry(log, entry_idx);
1979 /* skip over unused entries */
1980 if (entry->timestamp == 0)
1983 seq_printf(seq, "%10u %15llu %5d %5d",
1984 entry->seqno, entry->timestamp,
1985 entry->access, entry->execute);
1986 for (i = 0; i < MBOX_LEN / 8; i++) {
1987 u64 flit = entry->cmd[i];
1988 u32 hi = (u32)(flit >> 32);
1991 seq_printf(seq, " %08x %08x", hi, lo);
1993 seq_puts(seq, "\n");
1997 static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
1999 struct adapter *adapter = seq->private;
2000 struct mbox_cmd_log *log = adapter->mbox_log;
2002 return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
2005 static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
2007 return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
2010 static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
2013 return mboxlog_get_idx(seq, *pos);
2016 static void mboxlog_stop(struct seq_file *seq, void *v)
2020 static const struct seq_operations mboxlog_seq_ops = {
2021 .start = mboxlog_start,
2022 .next = mboxlog_next,
2023 .stop = mboxlog_stop,
2024 .show = mboxlog_show
2027 static int mboxlog_open(struct inode *inode, struct file *file)
2029 int res = seq_open(file, &mboxlog_seq_ops);
2032 struct seq_file *seq = file->private_data;
2034 seq->private = inode->i_private;
2039 static const struct file_operations mboxlog_fops = {
2040 .owner = THIS_MODULE,
2041 .open = mboxlog_open,
2043 .llseek = seq_lseek,
2044 .release = seq_release,
2048 * Show SGE Queue Set information. We display QPL Queues Sets per line.
2052 static int sge_qinfo_show(struct seq_file *seq, void *v)
2054 struct adapter *adapter = seq->private;
2055 int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
2056 int qs, r = (uintptr_t)v - 1;
2059 seq_putc(seq, '\n');
2061 #define S3(fmt_spec, s, v) \
2063 seq_printf(seq, "%-12s", s); \
2064 for (qs = 0; qs < n; ++qs) \
2065 seq_printf(seq, " %16" fmt_spec, v); \
2066 seq_putc(seq, '\n'); \
2068 #define S(s, v) S3("s", s, v)
2069 #define T(s, v) S3("u", s, txq[qs].v)
2070 #define R(s, v) S3("u", s, rxq[qs].v)
2072 if (r < eth_entries) {
2073 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
2074 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
2075 int n = min(QPL, adapter->sge.ethqsets - QPL * r);
2077 S("QType:", "Ethernet");
2079 (rxq[qs].rspq.netdev
2080 ? rxq[qs].rspq.netdev->name
2083 (rxq[qs].rspq.netdev
2084 ? ((struct port_info *)
2085 netdev_priv(rxq[qs].rspq.netdev))->port_id
2087 T("TxQ ID:", q.abs_id);
2088 T("TxQ size:", q.size);
2089 T("TxQ inuse:", q.in_use);
2090 T("TxQ PIdx:", q.pidx);
2091 T("TxQ CIdx:", q.cidx);
2092 R("RspQ ID:", rspq.abs_id);
2093 R("RspQ size:", rspq.size);
2094 R("RspQE size:", rspq.iqe_len);
2095 S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
2096 S3("u", "Intr pktcnt:",
2097 adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
2098 R("RspQ CIdx:", rspq.cidx);
2099 R("RspQ Gen:", rspq.gen);
2100 R("FL ID:", fl.abs_id);
2101 R("FL size:", fl.size - MIN_FL_RESID);
2102 R("FL avail:", fl.avail);
2103 R("FL PIdx:", fl.pidx);
2104 R("FL CIdx:", fl.cidx);
2110 const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
2112 seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
2113 seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
2114 seq_printf(seq, "%-12s %16u\n", "Intr delay:",
2115 qtimer_val(adapter, evtq));
2116 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
2117 adapter->sge.counter_val[evtq->pktcnt_idx]);
2118 seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
2119 seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
2120 } else if (r == 1) {
2121 const struct sge_rspq *intrq = &adapter->sge.intrq;
2123 seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
2124 seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
2125 seq_printf(seq, "%-12s %16u\n", "Intr delay:",
2126 qtimer_val(adapter, intrq));
2127 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
2128 adapter->sge.counter_val[intrq->pktcnt_idx]);
2129 seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
2130 seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
2142 * Return the number of "entries" in our "file". We group the multi-Queue
2143 * sections with QPL Queue Sets per "entry". The sections of the output are:
2145 * Ethernet RX/TX Queue Sets
2146 * Firmware Event Queue
2147 * Forwarded Interrupt Queue (if in MSI mode)
2149 static int sge_queue_entries(const struct adapter *adapter)
2151 return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
2152 ((adapter->flags & CXGB4VF_USING_MSI) != 0);
2155 static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
2157 int entries = sge_queue_entries(seq->private);
2159 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2162 static void sge_queue_stop(struct seq_file *seq, void *v)
2166 static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
2168 int entries = sge_queue_entries(seq->private);
2171 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2174 static const struct seq_operations sge_qinfo_seq_ops = {
2175 .start = sge_queue_start,
2176 .next = sge_queue_next,
2177 .stop = sge_queue_stop,
2178 .show = sge_qinfo_show
2181 static int sge_qinfo_open(struct inode *inode, struct file *file)
2183 int res = seq_open(file, &sge_qinfo_seq_ops);
2186 struct seq_file *seq = file->private_data;
2187 seq->private = inode->i_private;
2192 static const struct file_operations sge_qinfo_debugfs_fops = {
2193 .owner = THIS_MODULE,
2194 .open = sge_qinfo_open,
2196 .llseek = seq_lseek,
2197 .release = seq_release,
2201 * Show SGE Queue Set statistics. We display QPL Queues Sets per line.
2205 static int sge_qstats_show(struct seq_file *seq, void *v)
2207 struct adapter *adapter = seq->private;
2208 int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
2209 int qs, r = (uintptr_t)v - 1;
2212 seq_putc(seq, '\n');
2214 #define S3(fmt, s, v) \
2216 seq_printf(seq, "%-16s", s); \
2217 for (qs = 0; qs < n; ++qs) \
2218 seq_printf(seq, " %8" fmt, v); \
2219 seq_putc(seq, '\n'); \
2221 #define S(s, v) S3("s", s, v)
2223 #define T3(fmt, s, v) S3(fmt, s, txq[qs].v)
2224 #define T(s, v) T3("lu", s, v)
2226 #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v)
2227 #define R(s, v) R3("lu", s, v)
2229 if (r < eth_entries) {
2230 const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
2231 const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
2232 int n = min(QPL, adapter->sge.ethqsets - QPL * r);
2234 S("QType:", "Ethernet");
2236 (rxq[qs].rspq.netdev
2237 ? rxq[qs].rspq.netdev->name
2239 R3("u", "RspQNullInts:", rspq.unhandled_irqs);
2240 R("RxPackets:", stats.pkts);
2241 R("RxCSO:", stats.rx_cso);
2242 R("VLANxtract:", stats.vlan_ex);
2243 R("LROmerged:", stats.lro_merged);
2244 R("LROpackets:", stats.lro_pkts);
2245 R("RxDrops:", stats.rx_drops);
2247 T("TxCSO:", tx_cso);
2248 T("VLANins:", vlan_ins);
2249 T("TxQFull:", q.stops);
2250 T("TxQRestarts:", q.restarts);
2251 T("TxMapErr:", mapping_err);
2252 R("FLAllocErr:", fl.alloc_failed);
2253 R("FLLrgAlcErr:", fl.large_alloc_failed);
2254 R("FLStarving:", fl.starving);
2260 const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
2262 seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
2263 seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2264 evtq->unhandled_irqs);
2265 seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
2266 seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
2267 } else if (r == 1) {
2268 const struct sge_rspq *intrq = &adapter->sge.intrq;
2270 seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
2271 seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2272 intrq->unhandled_irqs);
2273 seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
2274 seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
2288 * Return the number of "entries" in our "file". We group the multi-Queue
2289 * sections with QPL Queue Sets per "entry". The sections of the output are:
2291 * Ethernet RX/TX Queue Sets
2292 * Firmware Event Queue
2293 * Forwarded Interrupt Queue (if in MSI mode)
2295 static int sge_qstats_entries(const struct adapter *adapter)
2297 return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
2298 ((adapter->flags & CXGB4VF_USING_MSI) != 0);
2301 static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
2303 int entries = sge_qstats_entries(seq->private);
2305 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2308 static void sge_qstats_stop(struct seq_file *seq, void *v)
2312 static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
2314 int entries = sge_qstats_entries(seq->private);
2317 return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2320 static const struct seq_operations sge_qstats_seq_ops = {
2321 .start = sge_qstats_start,
2322 .next = sge_qstats_next,
2323 .stop = sge_qstats_stop,
2324 .show = sge_qstats_show
2327 static int sge_qstats_open(struct inode *inode, struct file *file)
2329 int res = seq_open(file, &sge_qstats_seq_ops);
2332 struct seq_file *seq = file->private_data;
2333 seq->private = inode->i_private;
2338 static const struct file_operations sge_qstats_proc_fops = {
2339 .owner = THIS_MODULE,
2340 .open = sge_qstats_open,
2342 .llseek = seq_lseek,
2343 .release = seq_release,
2347 * Show PCI-E SR-IOV Virtual Function Resource Limits.
2349 static int resources_show(struct seq_file *seq, void *v)
2351 struct adapter *adapter = seq->private;
2352 struct vf_resources *vfres = &adapter->params.vfres;
2354 #define S(desc, fmt, var) \
2355 seq_printf(seq, "%-60s " fmt "\n", \
2356 desc " (" #var "):", vfres->var)
2358 S("Virtual Interfaces", "%d", nvi);
2359 S("Egress Queues", "%d", neq);
2360 S("Ethernet Control", "%d", nethctrl);
2361 S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
2362 S("Ingress Queues", "%d", niq);
2363 S("Traffic Class", "%d", tc);
2364 S("Port Access Rights Mask", "%#x", pmask);
2365 S("MAC Address Filters", "%d", nexactf);
2366 S("Firmware Command Read Capabilities", "%#x", r_caps);
2367 S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
2373 DEFINE_SHOW_ATTRIBUTE(resources);
2376 * Show Virtual Interfaces.
2378 static int interfaces_show(struct seq_file *seq, void *v)
2380 if (v == SEQ_START_TOKEN) {
2381 seq_puts(seq, "Interface Port VIID\n");
2383 struct adapter *adapter = seq->private;
2384 int pidx = (uintptr_t)v - 2;
2385 struct net_device *dev = adapter->port[pidx];
2386 struct port_info *pi = netdev_priv(dev);
2388 seq_printf(seq, "%9s %4d %#5x\n",
2389 dev->name, pi->port_id, pi->viid);
2394 static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
2396 return pos <= adapter->params.nports
2397 ? (void *)(uintptr_t)(pos + 1)
2401 static void *interfaces_start(struct seq_file *seq, loff_t *pos)
2404 ? interfaces_get_idx(seq->private, *pos)
2408 static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
2411 return interfaces_get_idx(seq->private, *pos);
2414 static void interfaces_stop(struct seq_file *seq, void *v)
2418 static const struct seq_operations interfaces_seq_ops = {
2419 .start = interfaces_start,
2420 .next = interfaces_next,
2421 .stop = interfaces_stop,
2422 .show = interfaces_show
2425 static int interfaces_open(struct inode *inode, struct file *file)
2427 int res = seq_open(file, &interfaces_seq_ops);
2430 struct seq_file *seq = file->private_data;
2431 seq->private = inode->i_private;
2436 static const struct file_operations interfaces_proc_fops = {
2437 .owner = THIS_MODULE,
2438 .open = interfaces_open,
2440 .llseek = seq_lseek,
2441 .release = seq_release,
2445 * /sys/kernel/debugfs/cxgb4vf/ files list.
2447 struct cxgb4vf_debugfs_entry {
2448 const char *name; /* name of debugfs node */
2449 umode_t mode; /* file system mode */
2450 const struct file_operations *fops;
2453 static struct cxgb4vf_debugfs_entry debugfs_files[] = {
2454 { "mboxlog", 0444, &mboxlog_fops },
2455 { "sge_qinfo", 0444, &sge_qinfo_debugfs_fops },
2456 { "sge_qstats", 0444, &sge_qstats_proc_fops },
2457 { "resources", 0444, &resources_fops },
2458 { "interfaces", 0444, &interfaces_proc_fops },
2462 * Module and device initialization and cleanup code.
2463 * ==================================================
2467 * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the
2468 * directory (debugfs_root) has already been set up.
2470 static int setup_debugfs(struct adapter *adapter)
2474 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2477 * Debugfs support is best effort.
2479 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
2480 debugfs_create_file(debugfs_files[i].name,
2481 debugfs_files[i].mode,
2482 adapter->debugfs_root, adapter,
2483 debugfs_files[i].fops);
2489 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
2490 * it to our caller to tear down the directory (debugfs_root).
2492 static void cleanup_debugfs(struct adapter *adapter)
2494 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2497 * Unlike our sister routine cleanup_proc(), we don't need to remove
2498 * individual entries because a call will be made to
2499 * debugfs_remove_recursive(). We just need to clean up any ancillary
2505 /* Figure out how many Ports and Queue Sets we can support. This depends on
2506 * knowing our Virtual Function Resources and may be called a second time if
2507 * we fall back from MSI-X to MSI Interrupt Mode.
2509 static void size_nports_qsets(struct adapter *adapter)
2511 struct vf_resources *vfres = &adapter->params.vfres;
2512 unsigned int ethqsets, pmask_nports;
2514 /* The number of "ports" which we support is equal to the number of
2515 * Virtual Interfaces with which we've been provisioned.
2517 adapter->params.nports = vfres->nvi;
2518 if (adapter->params.nports > MAX_NPORTS) {
2519 dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
2520 " allowed virtual interfaces\n", MAX_NPORTS,
2521 adapter->params.nports);
2522 adapter->params.nports = MAX_NPORTS;
2525 /* We may have been provisioned with more VIs than the number of
2526 * ports we're allowed to access (our Port Access Rights Mask).
2527 * This is obviously a configuration conflict but we don't want to
2528 * crash the kernel or anything silly just because of that.
2530 pmask_nports = hweight32(adapter->params.vfres.pmask);
2531 if (pmask_nports < adapter->params.nports) {
2532 dev_warn(adapter->pdev_dev, "only using %d of %d provisioned"
2533 " virtual interfaces; limited by Port Access Rights"
2534 " mask %#x\n", pmask_nports, adapter->params.nports,
2535 adapter->params.vfres.pmask);
2536 adapter->params.nports = pmask_nports;
2539 /* We need to reserve an Ingress Queue for the Asynchronous Firmware
2540 * Event Queue. And if we're using MSI Interrupts, we'll also need to
2541 * reserve an Ingress Queue for a Forwarded Interrupts.
2543 * The rest of the FL/Intr-capable ingress queues will be matched up
2544 * one-for-one with Ethernet/Control egress queues in order to form
2545 * "Queue Sets" which will be aportioned between the "ports". For
2546 * each Queue Set, we'll need the ability to allocate two Egress
2547 * Contexts -- one for the Ingress Queue Free List and one for the TX
2550 * Note that even if we're currently configured to use MSI-X
2551 * Interrupts (module variable msi == MSI_MSIX) we may get downgraded
2552 * to MSI Interrupts if we can't get enough MSI-X Interrupts. If that
2553 * happens we'll need to adjust things later.
2555 ethqsets = vfres->niqflint - 1 - (msi == MSI_MSI);
2556 if (vfres->nethctrl != ethqsets)
2557 ethqsets = min(vfres->nethctrl, ethqsets);
2558 if (vfres->neq < ethqsets*2)
2559 ethqsets = vfres->neq/2;
2560 if (ethqsets > MAX_ETH_QSETS)
2561 ethqsets = MAX_ETH_QSETS;
2562 adapter->sge.max_ethqsets = ethqsets;
2564 if (adapter->sge.max_ethqsets < adapter->params.nports) {
2565 dev_warn(adapter->pdev_dev, "only using %d of %d available"
2566 " virtual interfaces (too few Queue Sets)\n",
2567 adapter->sge.max_ethqsets, adapter->params.nports);
2568 adapter->params.nports = adapter->sge.max_ethqsets;
2573 * Perform early "adapter" initialization. This is where we discover what
2574 * adapter parameters we're going to be using and initialize basic adapter
2577 static int adap_init0(struct adapter *adapter)
2579 struct sge_params *sge_params = &adapter->params.sge;
2580 struct sge *s = &adapter->sge;
2585 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2586 * 2.6.31 and later we can't call pci_reset_function() in order to
2587 * issue an FLR because of a self- deadlock on the device semaphore.
2588 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2589 * cases where they're needed -- for instance, some versions of KVM
2590 * fail to reset "Assigned Devices" when the VM reboots. Therefore we
2591 * use the firmware based reset in order to reset any per function
2594 err = t4vf_fw_reset(adapter);
2596 dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2601 * Grab basic operational parameters. These will predominantly have
2602 * been set up by the Physical Function Driver or will be hard coded
2603 * into the adapter. We just have to live with them ... Note that
2604 * we _must_ get our VPD parameters before our SGE parameters because
2605 * we need to know the adapter's core clock from the VPD in order to
2606 * properly decode the SGE Timer Values.
2608 err = t4vf_get_dev_params(adapter);
2610 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2611 " device parameters: err=%d\n", err);
2614 err = t4vf_get_vpd_params(adapter);
2616 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2617 " VPD parameters: err=%d\n", err);
2620 err = t4vf_get_sge_params(adapter);
2622 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2623 " SGE parameters: err=%d\n", err);
2626 err = t4vf_get_rss_glb_config(adapter);
2628 dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2629 " RSS parameters: err=%d\n", err);
2632 if (adapter->params.rss.mode !=
2633 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2634 dev_err(adapter->pdev_dev, "unable to operate with global RSS"
2635 " mode %d\n", adapter->params.rss.mode);
2638 err = t4vf_sge_init(adapter);
2640 dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
2645 /* If we're running on newer firmware, let it know that we're
2646 * prepared to deal with encapsulated CPL messages. Older
2647 * firmware won't understand this and we'll just get
2648 * unencapsulated messages ...
2650 param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2651 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
2653 (void) t4vf_set_params(adapter, 1, ¶m, &val);
2656 * Retrieve our RX interrupt holdoff timer values and counter
2657 * threshold values from the SGE parameters.
2659 s->timer_val[0] = core_ticks_to_us(adapter,
2660 TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
2661 s->timer_val[1] = core_ticks_to_us(adapter,
2662 TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
2663 s->timer_val[2] = core_ticks_to_us(adapter,
2664 TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
2665 s->timer_val[3] = core_ticks_to_us(adapter,
2666 TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
2667 s->timer_val[4] = core_ticks_to_us(adapter,
2668 TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
2669 s->timer_val[5] = core_ticks_to_us(adapter,
2670 TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
2672 s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
2673 s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
2674 s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
2675 s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
2678 * Grab our Virtual Interface resource allocation, extract the
2679 * features that we're interested in and do a bit of sanity testing on
2682 err = t4vf_get_vfres(adapter);
2684 dev_err(adapter->pdev_dev, "unable to get virtual interface"
2685 " resources: err=%d\n", err);
2689 /* Check for various parameter sanity issues */
2690 if (adapter->params.vfres.pmask == 0) {
2691 dev_err(adapter->pdev_dev, "no port access configured\n"
2695 if (adapter->params.vfres.nvi == 0) {
2696 dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
2701 /* Initialize nports and max_ethqsets now that we have our Virtual
2702 * Function Resources.
2704 size_nports_qsets(adapter);
2706 adapter->flags |= CXGB4VF_FW_OK;
2710 static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
2711 u8 pkt_cnt_idx, unsigned int size,
2712 unsigned int iqe_size)
2714 rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
2715 (pkt_cnt_idx < SGE_NCOUNTERS ?
2716 QINTR_CNT_EN_F : 0));
2717 rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
2720 rspq->iqe_len = iqe_size;
2725 * Perform default configuration of DMA queues depending on the number and
2726 * type of ports we found and the number of available CPUs. Most settings can
2727 * be modified by the admin via ethtool and cxgbtool prior to the adapter
2728 * being brought up for the first time.
2730 static void cfg_queues(struct adapter *adapter)
2732 struct sge *s = &adapter->sge;
2733 int q10g, n10g, qidx, pidx, qs;
2737 * We should not be called till we know how many Queue Sets we can
2738 * support. In particular, this means that we need to know what kind
2739 * of interrupts we'll be using ...
2741 BUG_ON((adapter->flags &
2742 (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
2745 * Count the number of 10GbE Virtual Interfaces that we have.
2748 for_each_port(adapter, pidx)
2749 n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
2752 * We default to 1 queue per non-10G port and up to # of cores queues
2758 int n1g = (adapter->params.nports - n10g);
2759 q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
2760 if (q10g > num_online_cpus())
2761 q10g = num_online_cpus();
2765 * Allocate the "Queue Sets" to the various Virtual Interfaces.
2766 * The layout will be established in setup_sge_queues() when the
2767 * adapter is brough up for the first time.
2770 for_each_port(adapter, pidx) {
2771 struct port_info *pi = adap2pinfo(adapter, pidx);
2773 pi->first_qset = qidx;
2774 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
2780 * The Ingress Queue Entry Size for our various Response Queues needs
2781 * to be big enough to accommodate the largest message we can receive
2782 * from the chip/firmware; which is 64 bytes ...
2787 * Set up default Queue Set parameters ... Start off with the
2788 * shortest interrupt holdoff timer.
2790 for (qs = 0; qs < s->max_ethqsets; qs++) {
2791 struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2792 struct sge_eth_txq *txq = &s->ethtxq[qs];
2794 init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
2800 * The firmware event queue is used for link state changes and
2801 * notifications of TX DMA completions.
2803 init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
2806 * The forwarded interrupt queue is used when we're in MSI interrupt
2807 * mode. In this mode all interrupts associated with RX queues will
2808 * be forwarded to a single queue which we'll associate with our MSI
2809 * interrupt vector. The messages dropped in the forwarded interrupt
2810 * queue will indicate which ingress queue needs servicing ... This
2811 * queue needs to be large enough to accommodate all of the ingress
2812 * queues which are forwarding their interrupt (+1 to prevent the PIDX
2813 * from equalling the CIDX if every ingress queue has an outstanding
2814 * interrupt). The queue doesn't need to be any larger because no
2815 * ingress queue will ever have more than one outstanding interrupt at
2818 init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
2823 * Reduce the number of Ethernet queues across all ports to at most n.
2824 * n provides at least one queue per port.
2826 static void reduce_ethqs(struct adapter *adapter, int n)
2829 struct port_info *pi;
2832 * While we have too many active Ether Queue Sets, interate across the
2833 * "ports" and reduce their individual Queue Set allocations.
2835 BUG_ON(n < adapter->params.nports);
2836 while (n < adapter->sge.ethqsets)
2837 for_each_port(adapter, i) {
2838 pi = adap2pinfo(adapter, i);
2839 if (pi->nqsets > 1) {
2841 adapter->sge.ethqsets--;
2842 if (adapter->sge.ethqsets <= n)
2848 * Reassign the starting Queue Sets for each of the "ports" ...
2851 for_each_port(adapter, i) {
2852 pi = adap2pinfo(adapter, i);
2859 * We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally
2860 * we get a separate MSI-X vector for every "Queue Set" plus any extras we
2861 * need. Minimally we need one for every Virtual Interface plus those needed
2862 * for our "extras". Note that this process may lower the maximum number of
2863 * allowed Queue Sets ...
2865 static int enable_msix(struct adapter *adapter)
2867 int i, want, need, nqsets;
2868 struct msix_entry entries[MSIX_ENTRIES];
2869 struct sge *s = &adapter->sge;
2871 for (i = 0; i < MSIX_ENTRIES; ++i)
2872 entries[i].entry = i;
2875 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
2876 * plus those needed for our "extras" (for example, the firmware
2877 * message queue). We _need_ at least one "Queue Set" per Virtual
2878 * Interface plus those needed for our "extras". So now we get to see
2879 * if the song is right ...
2881 want = s->max_ethqsets + MSIX_EXTRAS;
2882 need = adapter->params.nports + MSIX_EXTRAS;
2884 want = pci_enable_msix_range(adapter->pdev, entries, need, want);
2888 nqsets = want - MSIX_EXTRAS;
2889 if (nqsets < s->max_ethqsets) {
2890 dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
2891 " for %d Queue Sets\n", nqsets);
2892 s->max_ethqsets = nqsets;
2893 if (nqsets < s->ethqsets)
2894 reduce_ethqs(adapter, nqsets);
2896 for (i = 0; i < want; ++i)
2897 adapter->msix_info[i].vec = entries[i].vector;
2902 static const struct net_device_ops cxgb4vf_netdev_ops = {
2903 .ndo_open = cxgb4vf_open,
2904 .ndo_stop = cxgb4vf_stop,
2905 .ndo_start_xmit = t4vf_eth_xmit,
2906 .ndo_get_stats = cxgb4vf_get_stats,
2907 .ndo_set_rx_mode = cxgb4vf_set_rxmode,
2908 .ndo_set_mac_address = cxgb4vf_set_mac_addr,
2909 .ndo_validate_addr = eth_validate_addr,
2910 .ndo_do_ioctl = cxgb4vf_do_ioctl,
2911 .ndo_change_mtu = cxgb4vf_change_mtu,
2912 .ndo_fix_features = cxgb4vf_fix_features,
2913 .ndo_set_features = cxgb4vf_set_features,
2914 #ifdef CONFIG_NET_POLL_CONTROLLER
2915 .ndo_poll_controller = cxgb4vf_poll_controller,
2920 * cxgb4vf_get_port_mask - Get port mask for the VF based on mac
2921 * address stored on the adapter
2922 * @adapter: The adapter
2924 * Find the the port mask for the VF based on the index of mac
2925 * address stored in the adapter. If no mac address is stored on
2926 * the adapter for the VF, use the port mask received from the
2929 static unsigned int cxgb4vf_get_port_mask(struct adapter *adapter)
2931 unsigned int naddr = 1, pidx = 0;
2932 unsigned int pmask, rmask = 0;
2936 pmask = adapter->params.vfres.pmask;
2939 err = t4vf_get_vf_mac_acl(adapter, pidx, &naddr, mac);
2940 if (!err && !is_zero_ether_addr(mac))
2941 rmask |= (1 << pidx);
2947 rmask = adapter->params.vfres.pmask;
2953 * "Probe" a device: initialize a device and construct all kernel and driver
2954 * state needed to manage the device. This routine is called "init_one" in
2957 static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2958 const struct pci_device_id *ent)
2960 struct adapter *adapter;
2961 struct net_device *netdev;
2962 struct port_info *pi;
2968 * Initialize generic PCI device state.
2970 err = pci_enable_device(pdev);
2972 dev_err(&pdev->dev, "cannot enable PCI device\n");
2977 * Reserve PCI resources for the device. If we can't get them some
2978 * other driver may have already claimed the device ...
2980 err = pci_request_regions(pdev, KBUILD_MODNAME);
2982 dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2983 goto err_disable_device;
2987 * Set up our DMA mask: try for 64-bit address masking first and
2988 * fall back to 32-bit if we can't get 64 bits ...
2990 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2992 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2994 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
2995 " coherent allocations\n");
2996 goto err_release_regions;
3000 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3002 dev_err(&pdev->dev, "no usable DMA configuration\n");
3003 goto err_release_regions;
3009 * Enable bus mastering for the device ...
3011 pci_set_master(pdev);
3014 * Allocate our adapter data structure and attach it to the device.
3016 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3019 goto err_release_regions;
3021 pci_set_drvdata(pdev, adapter);
3022 adapter->pdev = pdev;
3023 adapter->pdev_dev = &pdev->dev;
3025 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
3026 (sizeof(struct mbox_cmd) *
3027 T4VF_OS_LOG_MBOX_CMDS),
3029 if (!adapter->mbox_log) {
3031 goto err_free_adapter;
3033 adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS;
3036 * Initialize SMP data synchronization resources.
3038 spin_lock_init(&adapter->stats_lock);
3039 spin_lock_init(&adapter->mbox_lock);
3040 INIT_LIST_HEAD(&adapter->mlist.list);
3043 * Map our I/O registers in BAR0.
3045 adapter->regs = pci_ioremap_bar(pdev, 0);
3046 if (!adapter->regs) {
3047 dev_err(&pdev->dev, "cannot map device registers\n");
3049 goto err_free_adapter;
3052 /* Wait for the device to become ready before proceeding ...
3054 err = t4vf_prep_adapter(adapter);
3056 dev_err(adapter->pdev_dev, "device didn't become ready:"
3058 goto err_unmap_bar0;
3061 /* For T5 and later we want to use the new BAR-based User Doorbells,
3062 * so we need to map BAR2 here ...
3064 if (!is_t4(adapter->params.chip)) {
3065 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
3066 pci_resource_len(pdev, 2));
3067 if (!adapter->bar2) {
3068 dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n");
3070 goto err_unmap_bar0;
3074 * Initialize adapter level features.
3076 adapter->name = pci_name(pdev);
3077 adapter->msg_enable = DFLT_MSG_ENABLE;
3079 /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
3080 * Ingress Packet Data to Free List Buffers in order to allow for
3081 * chipset performance optimizations between the Root Complex and
3082 * Memory Controllers. (Messages to the associated Ingress Queue
3083 * notifying new Packet Placement in the Free Lists Buffers will be
3084 * send without the Relaxed Ordering Attribute thus guaranteeing that
3085 * all preceding PCIe Transaction Layer Packets will be processed
3086 * first.) But some Root Complexes have various issues with Upstream
3087 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
3088 * The PCIe devices which under the Root Complexes will be cleared the
3089 * Relaxed Ordering bit in the configuration space, So we check our
3090 * PCIe configuration space to see if it's flagged with advice against
3091 * using Relaxed Ordering.
3093 if (!pcie_relaxed_ordering_enabled(pdev))
3094 adapter->flags |= CXGB4VF_ROOT_NO_RELAXED_ORDERING;
3096 err = adap_init0(adapter);
3099 "Adapter initialization failed, error %d. Continuing in debug mode\n",
3102 /* Initialize hash mac addr list */
3103 INIT_LIST_HEAD(&adapter->mac_hlist);
3106 * Allocate our "adapter ports" and stitch everything together.
3108 pmask = cxgb4vf_get_port_mask(adapter);
3109 for_each_port(adapter, pidx) {
3112 unsigned int naddr = 1;
3115 * We simplistically allocate our virtual interfaces
3116 * sequentially across the port numbers to which we have
3117 * access rights. This should be configurable in some manner
3122 port_id = ffs(pmask) - 1;
3123 pmask &= ~(1 << port_id);
3126 * Allocate our network device and stitch things together.
3128 netdev = alloc_etherdev_mq(sizeof(struct port_info),
3130 if (netdev == NULL) {
3134 adapter->port[pidx] = netdev;
3135 SET_NETDEV_DEV(netdev, &pdev->dev);
3136 pi = netdev_priv(netdev);
3137 pi->adapter = adapter;
3139 pi->port_id = port_id;
3142 * Initialize the starting state of our "port" and register
3145 pi->xact_addr_filt = -1;
3146 netdev->irq = pdev->irq;
3148 netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_GRO |
3149 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3150 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
3151 netdev->features = netdev->hw_features;
3153 netdev->features |= NETIF_F_HIGHDMA;
3154 netdev->vlan_features = netdev->features & VLAN_FEAT;
3156 netdev->priv_flags |= IFF_UNICAST_FLT;
3157 netdev->min_mtu = 81;
3158 netdev->max_mtu = ETH_MAX_MTU;
3160 netdev->netdev_ops = &cxgb4vf_netdev_ops;
3161 netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
3162 netdev->dev_port = pi->port_id;
3165 * If we haven't been able to contact the firmware, there's
3166 * nothing else we can do for this "port" ...
3168 if (!(adapter->flags & CXGB4VF_FW_OK))
3171 viid = t4vf_alloc_vi(adapter, port_id);
3174 "cannot allocate VI for port %d: err=%d\n",
3182 * Initialize the hardware/software state for the port.
3184 err = t4vf_port_init(adapter, pidx);
3186 dev_err(&pdev->dev, "cannot initialize port %d\n",
3191 err = t4vf_get_vf_mac_acl(adapter, port_id, &naddr, mac);
3194 "unable to determine MAC ACL address, "
3195 "continuing anyway.. (status %d)\n", err);
3196 } else if (naddr && adapter->params.vfres.nvi == 1) {
3197 struct sockaddr addr;
3199 ether_addr_copy(addr.sa_data, mac);
3200 err = cxgb4vf_set_mac_addr(netdev, &addr);
3203 "unable to set MAC address %pM\n",
3207 dev_info(&pdev->dev,
3208 "Using assigned MAC ACL: %pM\n", mac);
3212 /* See what interrupts we'll be using. If we've been configured to
3213 * use MSI-X interrupts, try to enable them but fall back to using
3214 * MSI interrupts if we can't enable MSI-X interrupts. If we can't
3215 * get MSI interrupts we bail with the error.
3217 if (msi == MSI_MSIX && enable_msix(adapter) == 0)
3218 adapter->flags |= CXGB4VF_USING_MSIX;
3220 if (msi == MSI_MSIX) {
3221 dev_info(adapter->pdev_dev,
3222 "Unable to use MSI-X Interrupts; falling "
3223 "back to MSI Interrupts\n");
3225 /* We're going to need a Forwarded Interrupt Queue so
3226 * that may cut into how many Queue Sets we can
3230 size_nports_qsets(adapter);
3232 err = pci_enable_msi(pdev);
3234 dev_err(&pdev->dev, "Unable to allocate MSI Interrupts;"
3238 adapter->flags |= CXGB4VF_USING_MSI;
3241 /* Now that we know how many "ports" we have and what interrupt
3242 * mechanism we're going to use, we can configure our queue resources.
3244 cfg_queues(adapter);
3247 * The "card" is now ready to go. If any errors occur during device
3248 * registration we do not fail the whole "card" but rather proceed
3249 * only with the ports we manage to register successfully. However we
3250 * must register at least one net device.
3252 for_each_port(adapter, pidx) {
3253 struct port_info *pi = netdev_priv(adapter->port[pidx]);
3254 netdev = adapter->port[pidx];
3258 netif_set_real_num_tx_queues(netdev, pi->nqsets);
3259 netif_set_real_num_rx_queues(netdev, pi->nqsets);
3261 err = register_netdev(netdev);
3263 dev_warn(&pdev->dev, "cannot register net device %s,"
3264 " skipping\n", netdev->name);
3268 netif_carrier_off(netdev);
3269 set_bit(pidx, &adapter->registered_device_map);
3271 if (adapter->registered_device_map == 0) {
3272 dev_err(&pdev->dev, "could not register any net devices\n");
3273 goto err_disable_interrupts;
3277 * Set up our debugfs entries.
3279 if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
3280 adapter->debugfs_root =
3281 debugfs_create_dir(pci_name(pdev),
3282 cxgb4vf_debugfs_root);
3283 setup_debugfs(adapter);
3287 * Print a short notice on the existence and configuration of the new
3288 * VF network device ...
3290 for_each_port(adapter, pidx) {
3291 dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
3292 adapter->port[pidx]->name,
3293 (adapter->flags & CXGB4VF_USING_MSIX) ? "MSI-X" :
3294 (adapter->flags & CXGB4VF_USING_MSI) ? "MSI" : "");
3303 * Error recovery and exit code. Unwind state that's been created
3304 * so far and return the error.
3306 err_disable_interrupts:
3307 if (adapter->flags & CXGB4VF_USING_MSIX) {
3308 pci_disable_msix(adapter->pdev);
3309 adapter->flags &= ~CXGB4VF_USING_MSIX;
3310 } else if (adapter->flags & CXGB4VF_USING_MSI) {
3311 pci_disable_msi(adapter->pdev);
3312 adapter->flags &= ~CXGB4VF_USING_MSI;
3316 for_each_port(adapter, pidx) {
3317 netdev = adapter->port[pidx];
3320 pi = netdev_priv(netdev);
3322 t4vf_free_vi(adapter, pi->viid);
3323 if (test_bit(pidx, &adapter->registered_device_map))
3324 unregister_netdev(netdev);
3325 free_netdev(netdev);
3328 if (!is_t4(adapter->params.chip))
3329 iounmap(adapter->bar2);
3332 iounmap(adapter->regs);
3335 kfree(adapter->mbox_log);
3338 err_release_regions:
3339 pci_release_regions(pdev);
3340 pci_clear_master(pdev);
3343 pci_disable_device(pdev);
3349 * "Remove" a device: tear down all kernel and driver state created in the
3350 * "probe" routine and quiesce the device (disable interrupts, etc.). (Note
3351 * that this is called "remove_one" in the PF Driver.)
3353 static void cxgb4vf_pci_remove(struct pci_dev *pdev)
3355 struct adapter *adapter = pci_get_drvdata(pdev);
3356 struct hash_mac_addr *entry, *tmp;
3359 * Tear down driver state associated with device.
3365 * Stop all of our activity. Unregister network port,
3366 * disable interrupts, etc.
3368 for_each_port(adapter, pidx)
3369 if (test_bit(pidx, &adapter->registered_device_map))
3370 unregister_netdev(adapter->port[pidx]);
3371 t4vf_sge_stop(adapter);
3372 if (adapter->flags & CXGB4VF_USING_MSIX) {
3373 pci_disable_msix(adapter->pdev);
3374 adapter->flags &= ~CXGB4VF_USING_MSIX;
3375 } else if (adapter->flags & CXGB4VF_USING_MSI) {
3376 pci_disable_msi(adapter->pdev);
3377 adapter->flags &= ~CXGB4VF_USING_MSI;
3381 * Tear down our debugfs entries.
3383 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
3384 cleanup_debugfs(adapter);
3385 debugfs_remove_recursive(adapter->debugfs_root);
3389 * Free all of the various resources which we've acquired ...
3391 t4vf_free_sge_resources(adapter);
3392 for_each_port(adapter, pidx) {
3393 struct net_device *netdev = adapter->port[pidx];
3394 struct port_info *pi;
3399 pi = netdev_priv(netdev);
3401 t4vf_free_vi(adapter, pi->viid);
3402 free_netdev(netdev);
3404 iounmap(adapter->regs);
3405 if (!is_t4(adapter->params.chip))
3406 iounmap(adapter->bar2);
3407 kfree(adapter->mbox_log);
3408 list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
3410 list_del(&entry->list);
3417 * Disable the device and release its PCI resources.
3419 pci_disable_device(pdev);
3420 pci_clear_master(pdev);
3421 pci_release_regions(pdev);
3425 * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
3428 static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
3430 struct adapter *adapter;
3433 adapter = pci_get_drvdata(pdev);
3437 /* Disable all Virtual Interfaces. This will shut down the
3438 * delivery of all ingress packets into the chip for these
3439 * Virtual Interfaces.
3441 for_each_port(adapter, pidx)
3442 if (test_bit(pidx, &adapter->registered_device_map))
3443 unregister_netdev(adapter->port[pidx]);
3445 /* Free up all Queues which will prevent further DMA and
3446 * Interrupts allowing various internal pathways to drain.
3448 t4vf_sge_stop(adapter);
3449 if (adapter->flags & CXGB4VF_USING_MSIX) {
3450 pci_disable_msix(adapter->pdev);
3451 adapter->flags &= ~CXGB4VF_USING_MSIX;
3452 } else if (adapter->flags & CXGB4VF_USING_MSI) {
3453 pci_disable_msi(adapter->pdev);
3454 adapter->flags &= ~CXGB4VF_USING_MSI;
3458 * Free up all Queues which will prevent further DMA and
3459 * Interrupts allowing various internal pathways to drain.
3461 t4vf_free_sge_resources(adapter);
3462 pci_set_drvdata(pdev, NULL);
3465 /* Macros needed to support the PCI Device ID Table ...
3467 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
3468 static const struct pci_device_id cxgb4vf_pci_tbl[] = {
3469 #define CH_PCI_DEVICE_ID_FUNCTION 0x8
3471 #define CH_PCI_ID_TABLE_ENTRY(devid) \
3472 { PCI_VDEVICE(CHELSIO, (devid)), 0 }
3474 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
3476 #include "../cxgb4/t4_pci_id_tbl.h"
3478 MODULE_DESCRIPTION(DRV_DESC);
3479 MODULE_AUTHOR("Chelsio Communications");
3480 MODULE_LICENSE("Dual BSD/GPL");
3481 MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
3483 static struct pci_driver cxgb4vf_driver = {
3484 .name = KBUILD_MODNAME,
3485 .id_table = cxgb4vf_pci_tbl,
3486 .probe = cxgb4vf_pci_probe,
3487 .remove = cxgb4vf_pci_remove,
3488 .shutdown = cxgb4vf_pci_shutdown,
3492 * Initialize global driver state.
3494 static int __init cxgb4vf_module_init(void)
3499 * Vet our module parameters.
3501 if (msi != MSI_MSIX && msi != MSI_MSI) {
3502 pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
3503 msi, MSI_MSIX, MSI_MSI);
3507 /* Debugfs support is optional, debugfs will warn if this fails */
3508 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3510 ret = pci_register_driver(&cxgb4vf_driver);
3512 debugfs_remove(cxgb4vf_debugfs_root);
3517 * Tear down global driver state.
3519 static void __exit cxgb4vf_module_exit(void)
3521 pci_unregister_driver(&cxgb4vf_driver);
3522 debugfs_remove(cxgb4vf_debugfs_root);
3525 module_init(cxgb4vf_module_init);
3526 module_exit(cxgb4vf_module_exit);