2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <asm/uaccess.h>
71 #define DRV_VERSION "2.0.0-ko"
72 #define DRV_DESC "Chelsio T4/T5 Network Driver"
75 * Max interrupt hold-off timer value in us. Queues fall back to this value
76 * under extreme memory pressure so it's largish to give the system time to
79 #define MAX_SGE_TIMERVAL 200U
83 * Physical Function provisioning constants.
85 PFRES_NVI = 4, /* # of Virtual Interfaces */
86 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
87 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
89 PFRES_NEQ = 256, /* # of egress queues */
90 PFRES_NIQ = 0, /* # of ingress queues */
91 PFRES_TC = 0, /* PCI-E traffic class */
92 PFRES_NEXACTF = 128, /* # of exact MPS filters */
94 PFRES_R_CAPS = FW_CMD_CAP_PF,
95 PFRES_WX_CAPS = FW_CMD_CAP_PF,
99 * Virtual Function provisioning constants. We need two extra Ingress
100 * Queues with Interrupt capability to serve as the VF's Firmware
101 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
102 * neither will have Free Lists associated with them). For each
103 * Ethernet/Control Egress Queue and for each Free List, we need an
106 VFRES_NPORTS = 1, /* # of "ports" per VF */
107 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
109 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
110 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
111 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
112 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
113 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
114 VFRES_TC = 0, /* PCI-E traffic class */
115 VFRES_NEXACTF = 16, /* # of exact MPS filters */
117 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
118 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
123 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
124 * static and likely not to be useful in the long run. We really need to
125 * implement some form of persistent configuration which the firmware
128 static unsigned int pfvfres_pmask(struct adapter *adapter,
129 unsigned int pf, unsigned int vf)
131 unsigned int portn, portvec;
134 * Give PF's access to all of the ports.
137 return FW_PFVF_CMD_PMASK_MASK;
140 * For VFs, we'll assign them access to the ports based purely on the
141 * PF. We assign active ports in order, wrapping around if there are
142 * fewer active ports than PFs: e.g. active port[pf % nports].
143 * Unfortunately the adapter's port_info structs haven't been
144 * initialized yet so we have to compute this.
146 if (adapter->params.nports == 0)
149 portn = pf % adapter->params.nports;
150 portvec = adapter->params.portvec;
153 * Isolate the lowest set bit in the port vector. If we're at
154 * the port number that we want, return that as the pmask.
155 * otherwise mask that bit out of the port vector and
156 * decrement our port number ...
158 unsigned int pmask = portvec ^ (portvec & (portvec-1));
168 MAX_TXQ_ENTRIES = 16384,
169 MAX_CTRL_TXQ_ENTRIES = 1024,
170 MAX_RSPQ_ENTRIES = 16384,
171 MAX_RX_BUFFERS = 16384,
172 MIN_TXQ_ENTRIES = 32,
173 MIN_CTRL_TXQ_ENTRIES = 32,
174 MIN_RSPQ_ENTRIES = 128,
178 /* Host shadow copy of ingress filter entry. This is in host native format
179 * and doesn't match the ordering or bit order, etc. of the hardware of the
180 * firmware command. The use of bit-field structure elements is purely to
181 * remind ourselves of the field size limitations and save memory in the case
182 * where the filter table is large.
184 struct filter_entry {
185 /* Administrative fields for filter.
187 u32 valid:1; /* filter allocated and valid */
188 u32 locked:1; /* filter is administratively locked */
190 u32 pending:1; /* filter action is pending firmware reply */
191 u32 smtidx:8; /* Source MAC Table index for smac */
192 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
194 /* The filter itself. Most of this is a straight copy of information
195 * provided by the extended ioctl(). Some fields are translated to
196 * internal forms -- for instance the Ingress Queue ID passed in from
197 * the ioctl() is translated into the Absolute Ingress Queue ID.
199 struct ch_filter_specification fs;
202 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
203 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
204 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
206 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
208 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
209 CH_DEVICE(0xa000, 0), /* PE10K */
210 CH_DEVICE(0x4001, -1),
211 CH_DEVICE(0x4002, -1),
212 CH_DEVICE(0x4003, -1),
213 CH_DEVICE(0x4004, -1),
214 CH_DEVICE(0x4005, -1),
215 CH_DEVICE(0x4006, -1),
216 CH_DEVICE(0x4007, -1),
217 CH_DEVICE(0x4008, -1),
218 CH_DEVICE(0x4009, -1),
219 CH_DEVICE(0x400a, -1),
220 CH_DEVICE(0x4401, 4),
221 CH_DEVICE(0x4402, 4),
222 CH_DEVICE(0x4403, 4),
223 CH_DEVICE(0x4404, 4),
224 CH_DEVICE(0x4405, 4),
225 CH_DEVICE(0x4406, 4),
226 CH_DEVICE(0x4407, 4),
227 CH_DEVICE(0x4408, 4),
228 CH_DEVICE(0x4409, 4),
229 CH_DEVICE(0x440a, 4),
230 CH_DEVICE(0x440d, 4),
231 CH_DEVICE(0x440e, 4),
232 CH_DEVICE(0x5001, 4),
233 CH_DEVICE(0x5002, 4),
234 CH_DEVICE(0x5003, 4),
235 CH_DEVICE(0x5004, 4),
236 CH_DEVICE(0x5005, 4),
237 CH_DEVICE(0x5006, 4),
238 CH_DEVICE(0x5007, 4),
239 CH_DEVICE(0x5008, 4),
240 CH_DEVICE(0x5009, 4),
241 CH_DEVICE(0x500A, 4),
242 CH_DEVICE(0x500B, 4),
243 CH_DEVICE(0x500C, 4),
244 CH_DEVICE(0x500D, 4),
245 CH_DEVICE(0x500E, 4),
246 CH_DEVICE(0x500F, 4),
247 CH_DEVICE(0x5010, 4),
248 CH_DEVICE(0x5011, 4),
249 CH_DEVICE(0x5012, 4),
250 CH_DEVICE(0x5013, 4),
251 CH_DEVICE(0x5401, 4),
252 CH_DEVICE(0x5402, 4),
253 CH_DEVICE(0x5403, 4),
254 CH_DEVICE(0x5404, 4),
255 CH_DEVICE(0x5405, 4),
256 CH_DEVICE(0x5406, 4),
257 CH_DEVICE(0x5407, 4),
258 CH_DEVICE(0x5408, 4),
259 CH_DEVICE(0x5409, 4),
260 CH_DEVICE(0x540A, 4),
261 CH_DEVICE(0x540B, 4),
262 CH_DEVICE(0x540C, 4),
263 CH_DEVICE(0x540D, 4),
264 CH_DEVICE(0x540E, 4),
265 CH_DEVICE(0x540F, 4),
266 CH_DEVICE(0x5410, 4),
267 CH_DEVICE(0x5411, 4),
268 CH_DEVICE(0x5412, 4),
269 CH_DEVICE(0x5413, 4),
273 #define FW_FNAME "cxgb4/t4fw.bin"
274 #define FW5_FNAME "cxgb4/t5fw.bin"
275 #define FW_CFNAME "cxgb4/t4-config.txt"
276 #define FW5_CFNAME "cxgb4/t5-config.txt"
278 MODULE_DESCRIPTION(DRV_DESC);
279 MODULE_AUTHOR("Chelsio Communications");
280 MODULE_LICENSE("Dual BSD/GPL");
281 MODULE_VERSION(DRV_VERSION);
282 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
283 MODULE_FIRMWARE(FW_FNAME);
284 MODULE_FIRMWARE(FW5_FNAME);
287 * Normally we're willing to become the firmware's Master PF but will be happy
288 * if another PF has already become the Master and initialized the adapter.
289 * Setting "force_init" will cause this driver to forcibly establish itself as
290 * the Master PF and initialize the adapter.
292 static uint force_init;
294 module_param(force_init, uint, 0644);
295 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
298 * Normally if the firmware we connect to has Configuration File support, we
299 * use that and only fall back to the old Driver-based initialization if the
300 * Configuration File fails for some reason. If force_old_init is set, then
301 * we'll always use the old Driver-based initialization sequence.
303 static uint force_old_init;
305 module_param(force_old_init, uint, 0644);
306 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
308 static int dflt_msg_enable = DFLT_MSG_ENABLE;
310 module_param(dflt_msg_enable, int, 0644);
311 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
314 * The driver uses the best interrupt scheme available on a platform in the
315 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
316 * of these schemes the driver may consider as follows:
318 * msi = 2: choose from among all three options
319 * msi = 1: only consider MSI and INTx interrupts
320 * msi = 0: force INTx interrupts
324 module_param(msi, int, 0644);
325 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
328 * Queue interrupt hold-off timer values. Queues default to the first of these
331 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
333 module_param_array(intr_holdoff, uint, NULL, 0644);
334 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
335 "0..4 in microseconds");
337 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
339 module_param_array(intr_cnt, uint, NULL, 0644);
340 MODULE_PARM_DESC(intr_cnt,
341 "thresholds 1..3 for queue interrupt packet counters");
344 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
345 * offset by 2 bytes in order to have the IP headers line up on 4-byte
346 * boundaries. This is a requirement for many architectures which will throw
347 * a machine check fault if an attempt is made to access one of the 4-byte IP
348 * header fields on a non-4-byte boundary. And it's a major performance issue
349 * even on some architectures which allow it like some implementations of the
350 * x86 ISA. However, some architectures don't mind this and for some very
351 * edge-case performance sensitive applications (like forwarding large volumes
352 * of small packets), setting this DMA offset to 0 will decrease the number of
353 * PCI-E Bus transfers enough to measurably affect performance.
355 static int rx_dma_offset = 2;
359 #ifdef CONFIG_PCI_IOV
360 module_param(vf_acls, bool, 0644);
361 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
363 /* Configure the number of PCI-E Virtual Function which are to be instantiated
364 * on SR-IOV Capable Physical Functions.
366 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
368 module_param_array(num_vf, uint, NULL, 0644);
369 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
373 * The filter TCAM has a fixed portion and a variable portion. The fixed
374 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
375 * ports. The variable portion is 36 bits which can include things like Exact
376 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
377 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
378 * far exceed the 36-bit budget for this "compressed" header portion of the
379 * filter. Thus, we have a scarce resource which must be carefully managed.
381 * By default we set this up to mostly match the set of filter matching
382 * capabilities of T3 but with accommodations for some of T4's more
383 * interesting features:
385 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
386 * [Inner] VLAN (17), Port (3), FCoE (1) }
389 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
390 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
391 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
394 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
396 module_param(tp_vlan_pri_map, uint, 0644);
397 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
399 static struct dentry *cxgb4_debugfs_root;
401 static LIST_HEAD(adapter_list);
402 static DEFINE_MUTEX(uld_mutex);
403 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
404 static const char *uld_str[] = { "RDMA", "iSCSI" };
406 static void link_report(struct net_device *dev)
408 if (!netif_carrier_ok(dev))
409 netdev_info(dev, "link down\n");
411 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
413 const char *s = "10Mbps";
414 const struct port_info *p = netdev_priv(dev);
416 switch (p->link_cfg.speed) {
428 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
433 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
435 struct net_device *dev = adapter->port[port_id];
437 /* Skip changes from disabled ports. */
438 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
440 netif_carrier_on(dev);
442 netif_carrier_off(dev);
448 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
450 static const char *mod_str[] = {
451 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
454 const struct net_device *dev = adap->port[port_id];
455 const struct port_info *pi = netdev_priv(dev);
457 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
458 netdev_info(dev, "port module unplugged\n");
459 else if (pi->mod_type < ARRAY_SIZE(mod_str))
460 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
464 * Configure the exact and hash address filters to handle a port's multicast
465 * and secondary unicast MAC addresses.
467 static int set_addr_filters(const struct net_device *dev, bool sleep)
475 const struct netdev_hw_addr *ha;
476 int uc_cnt = netdev_uc_count(dev);
477 int mc_cnt = netdev_mc_count(dev);
478 const struct port_info *pi = netdev_priv(dev);
479 unsigned int mb = pi->adapter->fn;
481 /* first do the secondary unicast addresses */
482 netdev_for_each_uc_addr(ha, dev) {
483 addr[naddr++] = ha->addr;
484 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
485 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
486 naddr, addr, filt_idx, &uhash, sleep);
495 /* next set up the multicast addresses */
496 netdev_for_each_mc_addr(ha, dev) {
497 addr[naddr++] = ha->addr;
498 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
499 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
500 naddr, addr, filt_idx, &mhash, sleep);
509 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
510 uhash | mhash, sleep);
513 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
514 module_param(dbfifo_int_thresh, int, 0644);
515 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
518 * usecs to sleep while draining the dbfifo
520 static int dbfifo_drain_delay = 1000;
521 module_param(dbfifo_drain_delay, int, 0644);
522 MODULE_PARM_DESC(dbfifo_drain_delay,
523 "usecs to sleep while draining the dbfifo");
526 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
527 * If @mtu is -1 it is left unchanged.
529 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
532 struct port_info *pi = netdev_priv(dev);
534 ret = set_addr_filters(dev, sleep_ok);
536 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
537 (dev->flags & IFF_PROMISC) ? 1 : 0,
538 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
543 static struct workqueue_struct *workq;
546 * link_start - enable a port
547 * @dev: the port to enable
549 * Performs the MAC and PHY actions needed to enable a port.
551 static int link_start(struct net_device *dev)
554 struct port_info *pi = netdev_priv(dev);
555 unsigned int mb = pi->adapter->fn;
558 * We do not set address filters and promiscuity here, the stack does
559 * that step explicitly.
561 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
562 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
564 ret = t4_change_mac(pi->adapter, mb, pi->viid,
565 pi->xact_addr_filt, dev->dev_addr, true,
568 pi->xact_addr_filt = ret;
573 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
576 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
580 /* Clear a filter and release any of its resources that we own. This also
581 * clears the filter's "pending" status.
583 static void clear_filter(struct adapter *adap, struct filter_entry *f)
585 /* If the new or old filter have loopback rewriteing rules then we'll
586 * need to free any existing Layer Two Table (L2T) entries of the old
587 * filter rule. The firmware will handle freeing up any Source MAC
588 * Table (SMT) entries used for rewriting Source MAC Addresses in
592 cxgb4_l2t_release(f->l2t);
594 /* The zeroing of the filter rule below clears the filter valid,
595 * pending, locked flags, l2t pointer, etc. so it's all we need for
598 memset(f, 0, sizeof(*f));
601 /* Handle a filter write/deletion reply.
603 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
605 unsigned int idx = GET_TID(rpl);
606 unsigned int nidx = idx - adap->tids.ftid_base;
608 struct filter_entry *f;
610 if (idx >= adap->tids.ftid_base && nidx <
611 (adap->tids.nftids + adap->tids.nsftids)) {
613 ret = GET_TCB_COOKIE(rpl->cookie);
614 f = &adap->tids.ftid_tab[idx];
616 if (ret == FW_FILTER_WR_FLT_DELETED) {
617 /* Clear the filter when we get confirmation from the
618 * hardware that the filter has been deleted.
620 clear_filter(adap, f);
621 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
622 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
624 clear_filter(adap, f);
625 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
626 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
627 f->pending = 0; /* asynchronous setup completed */
630 /* Something went wrong. Issue a warning about the
631 * problem and clear everything out.
633 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
635 clear_filter(adap, f);
640 /* Response queue handler for the FW event queue.
642 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
643 const struct pkt_gl *gl)
645 u8 opcode = ((const struct rss_header *)rsp)->opcode;
647 rsp++; /* skip RSS header */
649 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
651 if (unlikely(opcode == CPL_FW4_MSG &&
652 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
654 opcode = ((const struct rss_header *)rsp)->opcode;
656 if (opcode != CPL_SGE_EGR_UPDATE) {
657 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
663 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
664 const struct cpl_sge_egr_update *p = (void *)rsp;
665 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
668 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
670 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
671 struct sge_eth_txq *eq;
673 eq = container_of(txq, struct sge_eth_txq, q);
674 netif_tx_wake_queue(eq->txq);
676 struct sge_ofld_txq *oq;
678 oq = container_of(txq, struct sge_ofld_txq, q);
679 tasklet_schedule(&oq->qresume_tsk);
681 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
682 const struct cpl_fw6_msg *p = (void *)rsp;
685 t4_handle_fw_rpl(q->adap, p->data);
686 } else if (opcode == CPL_L2T_WRITE_RPL) {
687 const struct cpl_l2t_write_rpl *p = (void *)rsp;
689 do_l2t_write_rpl(q->adap, p);
690 } else if (opcode == CPL_SET_TCB_RPL) {
691 const struct cpl_set_tcb_rpl *p = (void *)rsp;
693 filter_rpl(q->adap, p);
695 dev_err(q->adap->pdev_dev,
696 "unexpected CPL %#x on FW event queue\n", opcode);
702 * uldrx_handler - response queue handler for ULD queues
703 * @q: the response queue that received the packet
704 * @rsp: the response queue descriptor holding the offload message
705 * @gl: the gather list of packet fragments
707 * Deliver an ingress offload packet to a ULD. All processing is done by
708 * the ULD, we just maintain statistics.
710 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
711 const struct pkt_gl *gl)
713 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
715 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
717 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
718 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
721 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
727 else if (gl == CXGB4_MSG_AN)
734 static void disable_msi(struct adapter *adapter)
736 if (adapter->flags & USING_MSIX) {
737 pci_disable_msix(adapter->pdev);
738 adapter->flags &= ~USING_MSIX;
739 } else if (adapter->flags & USING_MSI) {
740 pci_disable_msi(adapter->pdev);
741 adapter->flags &= ~USING_MSI;
746 * Interrupt handler for non-data events used with MSI-X.
748 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
750 struct adapter *adap = cookie;
752 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
755 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
757 t4_slow_intr_handler(adap);
762 * Name the MSI-X interrupts.
764 static void name_msix_vecs(struct adapter *adap)
766 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
768 /* non-data interrupts */
769 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
772 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
773 adap->port[0]->name);
775 /* Ethernet queues */
776 for_each_port(adap, j) {
777 struct net_device *d = adap->port[j];
778 const struct port_info *pi = netdev_priv(d);
780 for (i = 0; i < pi->nqsets; i++, msi_idx++)
781 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
786 for_each_ofldrxq(&adap->sge, i)
787 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
788 adap->port[0]->name, i);
790 for_each_rdmarxq(&adap->sge, i)
791 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
792 adap->port[0]->name, i);
795 static int request_msix_queue_irqs(struct adapter *adap)
797 struct sge *s = &adap->sge;
798 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
800 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
801 adap->msix_info[1].desc, &s->fw_evtq);
805 for_each_ethrxq(s, ethqidx) {
806 err = request_irq(adap->msix_info[msi_index].vec,
808 adap->msix_info[msi_index].desc,
809 &s->ethrxq[ethqidx].rspq);
814 for_each_ofldrxq(s, ofldqidx) {
815 err = request_irq(adap->msix_info[msi_index].vec,
817 adap->msix_info[msi_index].desc,
818 &s->ofldrxq[ofldqidx].rspq);
823 for_each_rdmarxq(s, rdmaqidx) {
824 err = request_irq(adap->msix_info[msi_index].vec,
826 adap->msix_info[msi_index].desc,
827 &s->rdmarxq[rdmaqidx].rspq);
835 while (--rdmaqidx >= 0)
836 free_irq(adap->msix_info[--msi_index].vec,
837 &s->rdmarxq[rdmaqidx].rspq);
838 while (--ofldqidx >= 0)
839 free_irq(adap->msix_info[--msi_index].vec,
840 &s->ofldrxq[ofldqidx].rspq);
841 while (--ethqidx >= 0)
842 free_irq(adap->msix_info[--msi_index].vec,
843 &s->ethrxq[ethqidx].rspq);
844 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
848 static void free_msix_queue_irqs(struct adapter *adap)
850 int i, msi_index = 2;
851 struct sge *s = &adap->sge;
853 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
854 for_each_ethrxq(s, i)
855 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
856 for_each_ofldrxq(s, i)
857 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
858 for_each_rdmarxq(s, i)
859 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
863 * write_rss - write the RSS table for a given port
865 * @queues: array of queue indices for RSS
867 * Sets up the portion of the HW RSS table for the port's VI to distribute
868 * packets to the Rx queues in @queues.
870 static int write_rss(const struct port_info *pi, const u16 *queues)
874 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
876 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
880 /* map the queue indices to queue ids */
881 for (i = 0; i < pi->rss_size; i++, queues++)
882 rss[i] = q[*queues].rspq.abs_id;
884 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
885 pi->rss_size, rss, pi->rss_size);
891 * setup_rss - configure RSS
894 * Sets up RSS for each port.
896 static int setup_rss(struct adapter *adap)
900 for_each_port(adap, i) {
901 const struct port_info *pi = adap2pinfo(adap, i);
903 err = write_rss(pi, pi->rss);
911 * Return the channel of the ingress queue with the given qid.
913 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
915 qid -= p->ingr_start;
916 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
920 * Wait until all NAPI handlers are descheduled.
922 static void quiesce_rx(struct adapter *adap)
926 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
927 struct sge_rspq *q = adap->sge.ingr_map[i];
930 napi_disable(&q->napi);
935 * Enable NAPI scheduling and interrupt generation for all Rx queues.
937 static void enable_rx(struct adapter *adap)
941 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
942 struct sge_rspq *q = adap->sge.ingr_map[i];
947 napi_enable(&q->napi);
948 /* 0-increment GTS to start the timer and enable interrupts */
949 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
950 SEINTARM(q->intr_params) |
951 INGRESSQID(q->cntxt_id));
956 * setup_sge_queues - configure SGE Tx/Rx/response queues
959 * Determines how many sets of SGE queues to use and initializes them.
960 * We support multiple queue sets per port if we have MSI-X, otherwise
961 * just one queue set per port.
963 static int setup_sge_queues(struct adapter *adap)
965 int err, msi_idx, i, j;
966 struct sge *s = &adap->sge;
968 bitmap_zero(s->starving_fl, MAX_EGRQ);
969 bitmap_zero(s->txq_maperr, MAX_EGRQ);
971 if (adap->flags & USING_MSIX)
972 msi_idx = 1; /* vector 0 is for non-queue interrupts */
974 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
978 msi_idx = -((int)s->intrq.abs_id + 1);
981 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
982 msi_idx, NULL, fwevtq_handler);
984 freeout: t4_free_sge_resources(adap);
988 for_each_port(adap, i) {
989 struct net_device *dev = adap->port[i];
990 struct port_info *pi = netdev_priv(dev);
991 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
992 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
994 for (j = 0; j < pi->nqsets; j++, q++) {
997 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1003 memset(&q->stats, 0, sizeof(q->stats));
1005 for (j = 0; j < pi->nqsets; j++, t++) {
1006 err = t4_sge_alloc_eth_txq(adap, t, dev,
1007 netdev_get_tx_queue(dev, j),
1008 s->fw_evtq.cntxt_id);
1014 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1015 for_each_ofldrxq(s, i) {
1016 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1017 struct net_device *dev = adap->port[i / j];
1021 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1022 &q->fl, uldrx_handler);
1025 memset(&q->stats, 0, sizeof(q->stats));
1026 s->ofld_rxq[i] = q->rspq.abs_id;
1027 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1028 s->fw_evtq.cntxt_id);
1033 for_each_rdmarxq(s, i) {
1034 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1038 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1039 msi_idx, &q->fl, uldrx_handler);
1042 memset(&q->stats, 0, sizeof(q->stats));
1043 s->rdma_rxq[i] = q->rspq.abs_id;
1046 for_each_port(adap, i) {
1048 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1049 * have RDMA queues, and that's the right value.
1051 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1052 s->fw_evtq.cntxt_id,
1053 s->rdmarxq[i].rspq.cntxt_id);
1058 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1059 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1060 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1065 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
1066 * started but failed, and a negative errno if flash load couldn't start.
1068 static int upgrade_fw(struct adapter *adap)
1071 u32 vers, exp_major;
1072 const struct fw_hdr *hdr;
1073 const struct firmware *fw;
1074 struct device *dev = adap->pdev_dev;
1077 switch (CHELSIO_CHIP_VERSION(adap->chip)) {
1079 fw_file_name = FW_FNAME;
1080 exp_major = FW_VERSION_MAJOR;
1083 fw_file_name = FW5_FNAME;
1084 exp_major = FW_VERSION_MAJOR_T5;
1087 dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
1091 ret = request_firmware(&fw, fw_file_name, dev);
1093 dev_err(dev, "unable to load firmware image %s, error %d\n",
1098 hdr = (const struct fw_hdr *)fw->data;
1099 vers = ntohl(hdr->fw_ver);
1100 if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
1101 ret = -EINVAL; /* wrong major version, won't do */
1106 * If the flash FW is unusable or we found something newer, load it.
1108 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
1109 vers > adap->params.fw_vers) {
1110 dev_info(dev, "upgrading firmware ...\n");
1111 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
1115 "firmware upgraded to version %pI4 from %s\n",
1116 &hdr->fw_ver, fw_file_name);
1118 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
1121 * Tell our caller that we didn't upgrade the firmware.
1126 out: release_firmware(fw);
1131 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1132 * The allocated memory is cleared.
1134 void *t4_alloc_mem(size_t size)
1136 void *p = kzalloc(size, GFP_KERNEL);
1144 * Free memory allocated through alloc_mem().
1146 static void t4_free_mem(void *addr)
1148 if (is_vmalloc_addr(addr))
1154 /* Send a Work Request to write the filter at a specified index. We construct
1155 * a Firmware Filter Work Request to have the work done and put the indicated
1156 * filter into "pending" mode which will prevent any further actions against
1157 * it till we get a reply from the firmware on the completion status of the
1160 static int set_filter_wr(struct adapter *adapter, int fidx)
1162 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1163 struct sk_buff *skb;
1164 struct fw_filter_wr *fwr;
1167 /* If the new filter requires loopback Destination MAC and/or VLAN
1168 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1171 if (f->fs.newdmac || f->fs.newvlan) {
1172 /* allocate L2T entry for new filter */
1173 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1176 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1177 f->fs.eport, f->fs.dmac)) {
1178 cxgb4_l2t_release(f->l2t);
1184 ftid = adapter->tids.ftid_base + fidx;
1186 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1187 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1188 memset(fwr, 0, sizeof(*fwr));
1190 /* It would be nice to put most of the following in t4_hw.c but most
1191 * of the work is translating the cxgbtool ch_filter_specification
1192 * into the Work Request and the definition of that structure is
1193 * currently in cxgbtool.h which isn't appropriate to pull into the
1194 * common code. We may eventually try to come up with a more neutral
1195 * filter specification structure but for now it's easiest to simply
1196 * put this fairly direct code in line ...
1198 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1199 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1201 htonl(V_FW_FILTER_WR_TID(ftid) |
1202 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1203 V_FW_FILTER_WR_NOREPLY(0) |
1204 V_FW_FILTER_WR_IQ(f->fs.iq));
1205 fwr->del_filter_to_l2tix =
1206 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1207 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1208 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1209 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1210 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1211 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1212 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1213 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1214 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1215 f->fs.newvlan == VLAN_REWRITE) |
1216 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1217 f->fs.newvlan == VLAN_REWRITE) |
1218 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1219 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1220 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1221 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1222 fwr->ethtype = htons(f->fs.val.ethtype);
1223 fwr->ethtypem = htons(f->fs.mask.ethtype);
1224 fwr->frag_to_ovlan_vldm =
1225 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1226 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1227 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1228 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1229 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1230 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1232 fwr->rx_chan_rx_rpl_iq =
1233 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1234 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1235 fwr->maci_to_matchtypem =
1236 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1237 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1238 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1239 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1240 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1241 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1242 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1243 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1244 fwr->ptcl = f->fs.val.proto;
1245 fwr->ptclm = f->fs.mask.proto;
1246 fwr->ttyp = f->fs.val.tos;
1247 fwr->ttypm = f->fs.mask.tos;
1248 fwr->ivlan = htons(f->fs.val.ivlan);
1249 fwr->ivlanm = htons(f->fs.mask.ivlan);
1250 fwr->ovlan = htons(f->fs.val.ovlan);
1251 fwr->ovlanm = htons(f->fs.mask.ovlan);
1252 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1253 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1254 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1255 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1256 fwr->lp = htons(f->fs.val.lport);
1257 fwr->lpm = htons(f->fs.mask.lport);
1258 fwr->fp = htons(f->fs.val.fport);
1259 fwr->fpm = htons(f->fs.mask.fport);
1261 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1263 /* Mark the filter as "pending" and ship off the Filter Work Request.
1264 * When we get the Work Request Reply we'll clear the pending status.
1267 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1268 t4_ofld_send(adapter, skb);
1272 /* Delete the filter at a specified index.
1274 static int del_filter_wr(struct adapter *adapter, int fidx)
1276 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1277 struct sk_buff *skb;
1278 struct fw_filter_wr *fwr;
1279 unsigned int len, ftid;
1282 ftid = adapter->tids.ftid_base + fidx;
1284 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1285 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1286 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1288 /* Mark the filter as "pending" and ship off the Filter Work Request.
1289 * When we get the Work Request Reply we'll clear the pending status.
1292 t4_mgmt_tx(adapter, skb);
1296 static inline int is_offload(const struct adapter *adap)
1298 return adap->params.offload;
1302 * Implementation of ethtool operations.
1305 static u32 get_msglevel(struct net_device *dev)
1307 return netdev2adap(dev)->msg_enable;
1310 static void set_msglevel(struct net_device *dev, u32 val)
1312 netdev2adap(dev)->msg_enable = val;
1315 static char stats_strings[][ETH_GSTRING_LEN] = {
1318 "TxBroadcastFrames ",
1319 "TxMulticastFrames ",
1325 "TxFrames128To255 ",
1326 "TxFrames256To511 ",
1327 "TxFrames512To1023 ",
1328 "TxFrames1024To1518 ",
1329 "TxFrames1519ToMax ",
1344 "RxBroadcastFrames ",
1345 "RxMulticastFrames ",
1357 "RxFrames128To255 ",
1358 "RxFrames256To511 ",
1359 "RxFrames512To1023 ",
1360 "RxFrames1024To1518 ",
1361 "RxFrames1519ToMax ",
1373 "RxBG0FramesDropped ",
1374 "RxBG1FramesDropped ",
1375 "RxBG2FramesDropped ",
1376 "RxBG3FramesDropped ",
1377 "RxBG0FramesTrunc ",
1378 "RxBG1FramesTrunc ",
1379 "RxBG2FramesTrunc ",
1380 "RxBG3FramesTrunc ",
1389 "WriteCoalSuccess ",
1393 static int get_sset_count(struct net_device *dev, int sset)
1397 return ARRAY_SIZE(stats_strings);
1403 #define T4_REGMAP_SIZE (160 * 1024)
1404 #define T5_REGMAP_SIZE (332 * 1024)
1406 static int get_regs_len(struct net_device *dev)
1408 struct adapter *adap = netdev2adap(dev);
1409 if (is_t4(adap->chip))
1410 return T4_REGMAP_SIZE;
1412 return T5_REGMAP_SIZE;
1415 static int get_eeprom_len(struct net_device *dev)
1420 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1422 struct adapter *adapter = netdev2adap(dev);
1424 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1425 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1426 strlcpy(info->bus_info, pci_name(adapter->pdev),
1427 sizeof(info->bus_info));
1429 if (adapter->params.fw_vers)
1430 snprintf(info->fw_version, sizeof(info->fw_version),
1431 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1432 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1433 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1434 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1435 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1436 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1437 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1438 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1439 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1442 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1444 if (stringset == ETH_SS_STATS)
1445 memcpy(data, stats_strings, sizeof(stats_strings));
1449 * port stats maintained per queue of the port. They should be in the same
1450 * order as in stats_strings above.
1452 struct queue_port_stats {
1462 static void collect_sge_port_stats(const struct adapter *adap,
1463 const struct port_info *p, struct queue_port_stats *s)
1466 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1467 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1469 memset(s, 0, sizeof(*s));
1470 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1472 s->tx_csum += tx->tx_cso;
1473 s->rx_csum += rx->stats.rx_cso;
1474 s->vlan_ex += rx->stats.vlan_ex;
1475 s->vlan_ins += tx->vlan_ins;
1476 s->gro_pkts += rx->stats.lro_pkts;
1477 s->gro_merged += rx->stats.lro_merged;
1481 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1484 struct port_info *pi = netdev_priv(dev);
1485 struct adapter *adapter = pi->adapter;
1488 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1490 data += sizeof(struct port_stats) / sizeof(u64);
1491 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1492 data += sizeof(struct queue_port_stats) / sizeof(u64);
1493 if (!is_t4(adapter->chip)) {
1494 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1495 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1496 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1497 *data = val1 - val2;
1502 memset(data, 0, 2 * sizeof(u64));
1508 * Return a version number to identify the type of adapter. The scheme is:
1509 * - bits 0..9: chip version
1510 * - bits 10..15: chip revision
1511 * - bits 16..23: register dump version
1513 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1515 return CHELSIO_CHIP_VERSION(ap->chip) |
1516 (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
1519 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1522 u32 *p = buf + start;
1524 for ( ; start <= end; start += sizeof(u32))
1525 *p++ = t4_read_reg(ap, start);
1528 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1531 static const unsigned int t4_reg_ranges[] = {
1751 static const unsigned int t5_reg_ranges[] = {
2179 struct adapter *ap = netdev2adap(dev);
2180 static const unsigned int *reg_ranges;
2181 int arr_size = 0, buf_size = 0;
2183 if (is_t4(ap->chip)) {
2184 reg_ranges = &t4_reg_ranges[0];
2185 arr_size = ARRAY_SIZE(t4_reg_ranges);
2186 buf_size = T4_REGMAP_SIZE;
2188 reg_ranges = &t5_reg_ranges[0];
2189 arr_size = ARRAY_SIZE(t5_reg_ranges);
2190 buf_size = T5_REGMAP_SIZE;
2193 regs->version = mk_adap_vers(ap);
2195 memset(buf, 0, buf_size);
2196 for (i = 0; i < arr_size; i += 2)
2197 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2200 static int restart_autoneg(struct net_device *dev)
2202 struct port_info *p = netdev_priv(dev);
2204 if (!netif_running(dev))
2206 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2208 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2212 static int identify_port(struct net_device *dev,
2213 enum ethtool_phys_id_state state)
2216 struct adapter *adap = netdev2adap(dev);
2218 if (state == ETHTOOL_ID_ACTIVE)
2220 else if (state == ETHTOOL_ID_INACTIVE)
2225 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2228 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2232 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2233 type == FW_PORT_TYPE_BT_XAUI) {
2235 if (caps & FW_PORT_CAP_SPEED_100M)
2236 v |= SUPPORTED_100baseT_Full;
2237 if (caps & FW_PORT_CAP_SPEED_1G)
2238 v |= SUPPORTED_1000baseT_Full;
2239 if (caps & FW_PORT_CAP_SPEED_10G)
2240 v |= SUPPORTED_10000baseT_Full;
2241 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2242 v |= SUPPORTED_Backplane;
2243 if (caps & FW_PORT_CAP_SPEED_1G)
2244 v |= SUPPORTED_1000baseKX_Full;
2245 if (caps & FW_PORT_CAP_SPEED_10G)
2246 v |= SUPPORTED_10000baseKX4_Full;
2247 } else if (type == FW_PORT_TYPE_KR)
2248 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2249 else if (type == FW_PORT_TYPE_BP_AP)
2250 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2251 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2252 else if (type == FW_PORT_TYPE_BP4_AP)
2253 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2254 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2255 SUPPORTED_10000baseKX4_Full;
2256 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2257 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2258 v |= SUPPORTED_FIBRE;
2260 if (caps & FW_PORT_CAP_ANEG)
2261 v |= SUPPORTED_Autoneg;
2265 static unsigned int to_fw_linkcaps(unsigned int caps)
2269 if (caps & ADVERTISED_100baseT_Full)
2270 v |= FW_PORT_CAP_SPEED_100M;
2271 if (caps & ADVERTISED_1000baseT_Full)
2272 v |= FW_PORT_CAP_SPEED_1G;
2273 if (caps & ADVERTISED_10000baseT_Full)
2274 v |= FW_PORT_CAP_SPEED_10G;
2278 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2280 const struct port_info *p = netdev_priv(dev);
2282 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2283 p->port_type == FW_PORT_TYPE_BT_XFI ||
2284 p->port_type == FW_PORT_TYPE_BT_XAUI)
2285 cmd->port = PORT_TP;
2286 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2287 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2288 cmd->port = PORT_FIBRE;
2289 else if (p->port_type == FW_PORT_TYPE_SFP) {
2290 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2291 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2292 cmd->port = PORT_DA;
2294 cmd->port = PORT_FIBRE;
2296 cmd->port = PORT_OTHER;
2298 if (p->mdio_addr >= 0) {
2299 cmd->phy_address = p->mdio_addr;
2300 cmd->transceiver = XCVR_EXTERNAL;
2301 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2302 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2304 cmd->phy_address = 0; /* not really, but no better option */
2305 cmd->transceiver = XCVR_INTERNAL;
2306 cmd->mdio_support = 0;
2309 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2310 cmd->advertising = from_fw_linkcaps(p->port_type,
2311 p->link_cfg.advertising);
2312 ethtool_cmd_speed_set(cmd,
2313 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2314 cmd->duplex = DUPLEX_FULL;
2315 cmd->autoneg = p->link_cfg.autoneg;
2321 static unsigned int speed_to_caps(int speed)
2323 if (speed == SPEED_100)
2324 return FW_PORT_CAP_SPEED_100M;
2325 if (speed == SPEED_1000)
2326 return FW_PORT_CAP_SPEED_1G;
2327 if (speed == SPEED_10000)
2328 return FW_PORT_CAP_SPEED_10G;
2332 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2335 struct port_info *p = netdev_priv(dev);
2336 struct link_config *lc = &p->link_cfg;
2337 u32 speed = ethtool_cmd_speed(cmd);
2339 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2342 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2344 * PHY offers a single speed. See if that's what's
2347 if (cmd->autoneg == AUTONEG_DISABLE &&
2348 (lc->supported & speed_to_caps(speed)))
2353 if (cmd->autoneg == AUTONEG_DISABLE) {
2354 cap = speed_to_caps(speed);
2356 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
2357 (speed == SPEED_10000))
2359 lc->requested_speed = cap;
2360 lc->advertising = 0;
2362 cap = to_fw_linkcaps(cmd->advertising);
2363 if (!(lc->supported & cap))
2365 lc->requested_speed = 0;
2366 lc->advertising = cap | FW_PORT_CAP_ANEG;
2368 lc->autoneg = cmd->autoneg;
2370 if (netif_running(dev))
2371 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2376 static void get_pauseparam(struct net_device *dev,
2377 struct ethtool_pauseparam *epause)
2379 struct port_info *p = netdev_priv(dev);
2381 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2382 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2383 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2386 static int set_pauseparam(struct net_device *dev,
2387 struct ethtool_pauseparam *epause)
2389 struct port_info *p = netdev_priv(dev);
2390 struct link_config *lc = &p->link_cfg;
2392 if (epause->autoneg == AUTONEG_DISABLE)
2393 lc->requested_fc = 0;
2394 else if (lc->supported & FW_PORT_CAP_ANEG)
2395 lc->requested_fc = PAUSE_AUTONEG;
2399 if (epause->rx_pause)
2400 lc->requested_fc |= PAUSE_RX;
2401 if (epause->tx_pause)
2402 lc->requested_fc |= PAUSE_TX;
2403 if (netif_running(dev))
2404 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2409 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2411 const struct port_info *pi = netdev_priv(dev);
2412 const struct sge *s = &pi->adapter->sge;
2414 e->rx_max_pending = MAX_RX_BUFFERS;
2415 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2416 e->rx_jumbo_max_pending = 0;
2417 e->tx_max_pending = MAX_TXQ_ENTRIES;
2419 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2420 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2421 e->rx_jumbo_pending = 0;
2422 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2425 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2428 const struct port_info *pi = netdev_priv(dev);
2429 struct adapter *adapter = pi->adapter;
2430 struct sge *s = &adapter->sge;
2432 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2433 e->tx_pending > MAX_TXQ_ENTRIES ||
2434 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2435 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2436 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2439 if (adapter->flags & FULL_INIT_DONE)
2442 for (i = 0; i < pi->nqsets; ++i) {
2443 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2444 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2445 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2450 static int closest_timer(const struct sge *s, int time)
2452 int i, delta, match = 0, min_delta = INT_MAX;
2454 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2455 delta = time - s->timer_val[i];
2458 if (delta < min_delta) {
2466 static int closest_thres(const struct sge *s, int thres)
2468 int i, delta, match = 0, min_delta = INT_MAX;
2470 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2471 delta = thres - s->counter_val[i];
2474 if (delta < min_delta) {
2483 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2485 static unsigned int qtimer_val(const struct adapter *adap,
2486 const struct sge_rspq *q)
2488 unsigned int idx = q->intr_params >> 1;
2490 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2494 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
2495 * @adap: the adapter
2497 * @us: the hold-off time in us, or 0 to disable timer
2498 * @cnt: the hold-off packet count, or 0 to disable counter
2500 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2501 * one of the two needs to be enabled for the queue to generate interrupts.
2503 static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2504 unsigned int us, unsigned int cnt)
2506 if ((us | cnt) == 0)
2513 new_idx = closest_thres(&adap->sge, cnt);
2514 if (q->desc && q->pktcnt_idx != new_idx) {
2515 /* the queue has already been created, update it */
2516 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2517 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2518 FW_PARAMS_PARAM_YZ(q->cntxt_id);
2519 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2524 q->pktcnt_idx = new_idx;
2527 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2528 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2532 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2534 const struct port_info *pi = netdev_priv(dev);
2535 struct adapter *adap = pi->adapter;
2540 for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
2541 q = &adap->sge.ethrxq[i].rspq;
2542 r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2543 c->rx_max_coalesced_frames);
2545 dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2552 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2554 const struct port_info *pi = netdev_priv(dev);
2555 const struct adapter *adap = pi->adapter;
2556 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2558 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2559 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2560 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2565 * eeprom_ptov - translate a physical EEPROM address to virtual
2566 * @phys_addr: the physical EEPROM address
2567 * @fn: the PCI function number
2568 * @sz: size of function-specific area
2570 * Translate a physical EEPROM address to virtual. The first 1K is
2571 * accessed through virtual addresses starting at 31K, the rest is
2572 * accessed through virtual addresses starting at 0.
2574 * The mapping is as follows:
2575 * [0..1K) -> [31K..32K)
2576 * [1K..1K+A) -> [31K-A..31K)
2577 * [1K+A..ES) -> [0..ES-A-1K)
2579 * where A = @fn * @sz, and ES = EEPROM size.
2581 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2584 if (phys_addr < 1024)
2585 return phys_addr + (31 << 10);
2586 if (phys_addr < 1024 + fn)
2587 return 31744 - fn + phys_addr - 1024;
2588 if (phys_addr < EEPROMSIZE)
2589 return phys_addr - 1024 - fn;
2594 * The next two routines implement eeprom read/write from physical addresses.
2596 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2598 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2601 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2602 return vaddr < 0 ? vaddr : 0;
2605 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2607 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2610 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2611 return vaddr < 0 ? vaddr : 0;
2614 #define EEPROM_MAGIC 0x38E2F10C
2616 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2620 struct adapter *adapter = netdev2adap(dev);
2622 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2626 e->magic = EEPROM_MAGIC;
2627 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2628 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2631 memcpy(data, buf + e->offset, e->len);
2636 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2641 u32 aligned_offset, aligned_len, *p;
2642 struct adapter *adapter = netdev2adap(dev);
2644 if (eeprom->magic != EEPROM_MAGIC)
2647 aligned_offset = eeprom->offset & ~3;
2648 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2650 if (adapter->fn > 0) {
2651 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2653 if (aligned_offset < start ||
2654 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2658 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2660 * RMW possibly needed for first or last words.
2662 buf = kmalloc(aligned_len, GFP_KERNEL);
2665 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2666 if (!err && aligned_len > 4)
2667 err = eeprom_rd_phys(adapter,
2668 aligned_offset + aligned_len - 4,
2669 (u32 *)&buf[aligned_len - 4]);
2672 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2676 err = t4_seeprom_wp(adapter, false);
2680 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2681 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2682 aligned_offset += 4;
2686 err = t4_seeprom_wp(adapter, true);
2693 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2696 const struct firmware *fw;
2697 struct adapter *adap = netdev2adap(netdev);
2699 ef->data[sizeof(ef->data) - 1] = '\0';
2700 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2704 ret = t4_load_fw(adap, fw->data, fw->size);
2705 release_firmware(fw);
2707 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2711 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2712 #define BCAST_CRC 0xa0ccc1a6
2714 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2716 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2717 wol->wolopts = netdev2adap(dev)->wol;
2718 memset(&wol->sopass, 0, sizeof(wol->sopass));
2721 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2724 struct port_info *pi = netdev_priv(dev);
2726 if (wol->wolopts & ~WOL_SUPPORTED)
2728 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2729 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2730 if (wol->wolopts & WAKE_BCAST) {
2731 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2734 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2735 ~6ULL, ~0ULL, BCAST_CRC, true);
2737 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2741 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2743 const struct port_info *pi = netdev_priv(dev);
2744 netdev_features_t changed = dev->features ^ features;
2747 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2750 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2752 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2754 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2758 static u32 get_rss_table_size(struct net_device *dev)
2760 const struct port_info *pi = netdev_priv(dev);
2762 return pi->rss_size;
2765 static int get_rss_table(struct net_device *dev, u32 *p)
2767 const struct port_info *pi = netdev_priv(dev);
2768 unsigned int n = pi->rss_size;
2775 static int set_rss_table(struct net_device *dev, const u32 *p)
2778 struct port_info *pi = netdev_priv(dev);
2780 for (i = 0; i < pi->rss_size; i++)
2782 if (pi->adapter->flags & FULL_INIT_DONE)
2783 return write_rss(pi, pi->rss);
2787 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2790 const struct port_info *pi = netdev_priv(dev);
2792 switch (info->cmd) {
2793 case ETHTOOL_GRXFH: {
2794 unsigned int v = pi->rss_mode;
2797 switch (info->flow_type) {
2799 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2800 info->data = RXH_IP_SRC | RXH_IP_DST |
2801 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2802 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2803 info->data = RXH_IP_SRC | RXH_IP_DST;
2806 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2807 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2808 info->data = RXH_IP_SRC | RXH_IP_DST |
2809 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2810 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2811 info->data = RXH_IP_SRC | RXH_IP_DST;
2814 case AH_ESP_V4_FLOW:
2816 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2817 info->data = RXH_IP_SRC | RXH_IP_DST;
2820 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2821 info->data = RXH_IP_SRC | RXH_IP_DST |
2822 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2823 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2824 info->data = RXH_IP_SRC | RXH_IP_DST;
2827 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2828 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2829 info->data = RXH_IP_SRC | RXH_IP_DST |
2830 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2831 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2832 info->data = RXH_IP_SRC | RXH_IP_DST;
2835 case AH_ESP_V6_FLOW:
2837 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2838 info->data = RXH_IP_SRC | RXH_IP_DST;
2843 case ETHTOOL_GRXRINGS:
2844 info->data = pi->nqsets;
2850 static const struct ethtool_ops cxgb_ethtool_ops = {
2851 .get_settings = get_settings,
2852 .set_settings = set_settings,
2853 .get_drvinfo = get_drvinfo,
2854 .get_msglevel = get_msglevel,
2855 .set_msglevel = set_msglevel,
2856 .get_ringparam = get_sge_param,
2857 .set_ringparam = set_sge_param,
2858 .get_coalesce = get_coalesce,
2859 .set_coalesce = set_coalesce,
2860 .get_eeprom_len = get_eeprom_len,
2861 .get_eeprom = get_eeprom,
2862 .set_eeprom = set_eeprom,
2863 .get_pauseparam = get_pauseparam,
2864 .set_pauseparam = set_pauseparam,
2865 .get_link = ethtool_op_get_link,
2866 .get_strings = get_strings,
2867 .set_phys_id = identify_port,
2868 .nway_reset = restart_autoneg,
2869 .get_sset_count = get_sset_count,
2870 .get_ethtool_stats = get_stats,
2871 .get_regs_len = get_regs_len,
2872 .get_regs = get_regs,
2875 .get_rxnfc = get_rxnfc,
2876 .get_rxfh_indir_size = get_rss_table_size,
2877 .get_rxfh_indir = get_rss_table,
2878 .set_rxfh_indir = set_rss_table,
2879 .flash_device = set_flash,
2885 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2889 loff_t avail = file_inode(file)->i_size;
2890 unsigned int mem = (uintptr_t)file->private_data & 3;
2891 struct adapter *adap = file->private_data - mem;
2897 if (count > avail - pos)
2898 count = avail - pos;
2905 if ((mem == MEM_MC) || (mem == MEM_MC1))
2906 ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
2908 ret = t4_edc_read(adap, mem, pos, data, NULL);
2912 ofst = pos % sizeof(data);
2913 len = min(count, sizeof(data) - ofst);
2914 if (copy_to_user(buf, (u8 *)data + ofst, len))
2921 count = pos - *ppos;
2926 static const struct file_operations mem_debugfs_fops = {
2927 .owner = THIS_MODULE,
2928 .open = simple_open,
2930 .llseek = default_llseek,
2933 static void add_debugfs_mem(struct adapter *adap, const char *name,
2934 unsigned int idx, unsigned int size_mb)
2938 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2939 (void *)adap + idx, &mem_debugfs_fops);
2940 if (de && de->d_inode)
2941 de->d_inode->i_size = size_mb << 20;
2944 static int setup_debugfs(struct adapter *adap)
2949 if (IS_ERR_OR_NULL(adap->debugfs_root))
2952 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2953 if (i & EDRAM0_ENABLE) {
2954 size = t4_read_reg(adap, MA_EDRAM0_BAR);
2955 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
2957 if (i & EDRAM1_ENABLE) {
2958 size = t4_read_reg(adap, MA_EDRAM1_BAR);
2959 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2961 if (is_t4(adap->chip)) {
2962 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2963 if (i & EXT_MEM_ENABLE)
2964 add_debugfs_mem(adap, "mc", MEM_MC,
2965 EXT_MEM_SIZE_GET(size));
2967 if (i & EXT_MEM_ENABLE) {
2968 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2969 add_debugfs_mem(adap, "mc0", MEM_MC0,
2970 EXT_MEM_SIZE_GET(size));
2972 if (i & EXT_MEM1_ENABLE) {
2973 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
2974 add_debugfs_mem(adap, "mc1", MEM_MC1,
2975 EXT_MEM_SIZE_GET(size));
2979 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2985 * upper-layer driver support
2989 * Allocate an active-open TID and set it to the supplied value.
2991 int cxgb4_alloc_atid(struct tid_info *t, void *data)
2995 spin_lock_bh(&t->atid_lock);
2997 union aopen_entry *p = t->afree;
2999 atid = (p - t->atid_tab) + t->atid_base;
3004 spin_unlock_bh(&t->atid_lock);
3007 EXPORT_SYMBOL(cxgb4_alloc_atid);
3010 * Release an active-open TID.
3012 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3014 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
3016 spin_lock_bh(&t->atid_lock);
3020 spin_unlock_bh(&t->atid_lock);
3022 EXPORT_SYMBOL(cxgb4_free_atid);
3025 * Allocate a server TID and set it to the supplied value.
3027 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3031 spin_lock_bh(&t->stid_lock);
3032 if (family == PF_INET) {
3033 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3034 if (stid < t->nstids)
3035 __set_bit(stid, t->stid_bmap);
3039 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3044 t->stid_tab[stid].data = data;
3045 stid += t->stid_base;
3048 spin_unlock_bh(&t->stid_lock);
3051 EXPORT_SYMBOL(cxgb4_alloc_stid);
3053 /* Allocate a server filter TID and set it to the supplied value.
3055 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3059 spin_lock_bh(&t->stid_lock);
3060 if (family == PF_INET) {
3061 stid = find_next_zero_bit(t->stid_bmap,
3062 t->nstids + t->nsftids, t->nstids);
3063 if (stid < (t->nstids + t->nsftids))
3064 __set_bit(stid, t->stid_bmap);
3071 t->stid_tab[stid].data = data;
3072 stid += t->stid_base;
3075 spin_unlock_bh(&t->stid_lock);
3078 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3080 /* Release a server TID.
3082 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3084 stid -= t->stid_base;
3085 spin_lock_bh(&t->stid_lock);
3086 if (family == PF_INET)
3087 __clear_bit(stid, t->stid_bmap);
3089 bitmap_release_region(t->stid_bmap, stid, 2);
3090 t->stid_tab[stid].data = NULL;
3092 spin_unlock_bh(&t->stid_lock);
3094 EXPORT_SYMBOL(cxgb4_free_stid);
3097 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3099 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3102 struct cpl_tid_release *req;
3104 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3105 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3106 INIT_TP_WR(req, tid);
3107 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3111 * Queue a TID release request and if necessary schedule a work queue to
3114 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3117 void **p = &t->tid_tab[tid];
3118 struct adapter *adap = container_of(t, struct adapter, tids);
3120 spin_lock_bh(&adap->tid_release_lock);
3121 *p = adap->tid_release_head;
3122 /* Low 2 bits encode the Tx channel number */
3123 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3124 if (!adap->tid_release_task_busy) {
3125 adap->tid_release_task_busy = true;
3126 queue_work(workq, &adap->tid_release_task);
3128 spin_unlock_bh(&adap->tid_release_lock);
3132 * Process the list of pending TID release requests.
3134 static void process_tid_release_list(struct work_struct *work)
3136 struct sk_buff *skb;
3137 struct adapter *adap;
3139 adap = container_of(work, struct adapter, tid_release_task);
3141 spin_lock_bh(&adap->tid_release_lock);
3142 while (adap->tid_release_head) {
3143 void **p = adap->tid_release_head;
3144 unsigned int chan = (uintptr_t)p & 3;
3145 p = (void *)p - chan;
3147 adap->tid_release_head = *p;
3149 spin_unlock_bh(&adap->tid_release_lock);
3151 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3153 schedule_timeout_uninterruptible(1);
3155 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3156 t4_ofld_send(adap, skb);
3157 spin_lock_bh(&adap->tid_release_lock);
3159 adap->tid_release_task_busy = false;
3160 spin_unlock_bh(&adap->tid_release_lock);
3164 * Release a TID and inform HW. If we are unable to allocate the release
3165 * message we defer to a work queue.
3167 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3170 struct sk_buff *skb;
3171 struct adapter *adap = container_of(t, struct adapter, tids);
3173 old = t->tid_tab[tid];
3174 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3176 t->tid_tab[tid] = NULL;
3177 mk_tid_release(skb, chan, tid);
3178 t4_ofld_send(adap, skb);
3180 cxgb4_queue_tid_release(t, chan, tid);
3182 atomic_dec(&t->tids_in_use);
3184 EXPORT_SYMBOL(cxgb4_remove_tid);
3187 * Allocate and initialize the TID tables. Returns 0 on success.
3189 static int tid_init(struct tid_info *t)
3192 unsigned int stid_bmap_size;
3193 unsigned int natids = t->natids;
3195 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3196 size = t->ntids * sizeof(*t->tid_tab) +
3197 natids * sizeof(*t->atid_tab) +
3198 t->nstids * sizeof(*t->stid_tab) +
3199 t->nsftids * sizeof(*t->stid_tab) +
3200 stid_bmap_size * sizeof(long) +
3201 t->nftids * sizeof(*t->ftid_tab) +
3202 t->nsftids * sizeof(*t->ftid_tab);
3204 t->tid_tab = t4_alloc_mem(size);
3208 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3209 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3210 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3211 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3212 spin_lock_init(&t->stid_lock);
3213 spin_lock_init(&t->atid_lock);
3215 t->stids_in_use = 0;
3217 t->atids_in_use = 0;
3218 atomic_set(&t->tids_in_use, 0);
3220 /* Setup the free list for atid_tab and clear the stid bitmap. */
3223 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3224 t->afree = t->atid_tab;
3226 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3231 * cxgb4_create_server - create an IP server
3233 * @stid: the server TID
3234 * @sip: local IP address to bind server to
3235 * @sport: the server's TCP port
3236 * @queue: queue to direct messages from this server to
3238 * Create an IP server for the given port and address.
3239 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3241 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3242 __be32 sip, __be16 sport, __be16 vlan,
3246 struct sk_buff *skb;
3247 struct adapter *adap;
3248 struct cpl_pass_open_req *req;
3250 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3254 adap = netdev2adap(dev);
3255 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3257 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3258 req->local_port = sport;
3259 req->peer_port = htons(0);
3260 req->local_ip = sip;
3261 req->peer_ip = htonl(0);
3262 chan = rxq_to_chan(&adap->sge, queue);
3263 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3264 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3265 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3266 return t4_mgmt_tx(adap, skb);
3268 EXPORT_SYMBOL(cxgb4_create_server);
3271 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3272 * @mtus: the HW MTU table
3273 * @mtu: the target MTU
3274 * @idx: index of selected entry in the MTU table
3276 * Returns the index and the value in the HW MTU table that is closest to
3277 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3278 * table, in which case that smallest available value is selected.
3280 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3285 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3291 EXPORT_SYMBOL(cxgb4_best_mtu);
3294 * cxgb4_port_chan - get the HW channel of a port
3295 * @dev: the net device for the port
3297 * Return the HW Tx channel of the given port.
3299 unsigned int cxgb4_port_chan(const struct net_device *dev)
3301 return netdev2pinfo(dev)->tx_chan;
3303 EXPORT_SYMBOL(cxgb4_port_chan);
3305 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3307 struct adapter *adap = netdev2adap(dev);
3308 u32 v1, v2, lp_count, hp_count;
3310 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3311 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3312 if (is_t4(adap->chip)) {
3313 lp_count = G_LP_COUNT(v1);
3314 hp_count = G_HP_COUNT(v1);
3316 lp_count = G_LP_COUNT_T5(v1);
3317 hp_count = G_HP_COUNT_T5(v2);
3319 return lpfifo ? lp_count : hp_count;
3321 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3324 * cxgb4_port_viid - get the VI id of a port
3325 * @dev: the net device for the port
3327 * Return the VI id of the given port.
3329 unsigned int cxgb4_port_viid(const struct net_device *dev)
3331 return netdev2pinfo(dev)->viid;
3333 EXPORT_SYMBOL(cxgb4_port_viid);
3336 * cxgb4_port_idx - get the index of a port
3337 * @dev: the net device for the port
3339 * Return the index of the given port.
3341 unsigned int cxgb4_port_idx(const struct net_device *dev)
3343 return netdev2pinfo(dev)->port_id;
3345 EXPORT_SYMBOL(cxgb4_port_idx);
3347 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3348 struct tp_tcp_stats *v6)
3350 struct adapter *adap = pci_get_drvdata(pdev);
3352 spin_lock(&adap->stats_lock);
3353 t4_tp_get_tcp_stats(adap, v4, v6);
3354 spin_unlock(&adap->stats_lock);
3356 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3358 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3359 const unsigned int *pgsz_order)
3361 struct adapter *adap = netdev2adap(dev);
3363 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3364 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3365 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3366 HPZ3(pgsz_order[3]));
3368 EXPORT_SYMBOL(cxgb4_iscsi_init);
3370 int cxgb4_flush_eq_cache(struct net_device *dev)
3372 struct adapter *adap = netdev2adap(dev);
3375 ret = t4_fwaddrspace_write(adap, adap->mbox,
3376 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3379 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3381 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3383 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3387 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
3389 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3390 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3395 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3398 struct adapter *adap = netdev2adap(dev);
3399 u16 hw_pidx, hw_cidx;
3402 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3406 if (pidx != hw_pidx) {
3409 if (pidx >= hw_pidx)
3410 delta = pidx - hw_pidx;
3412 delta = size - hw_pidx + pidx;
3414 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3415 QID(qid) | PIDX(delta));
3420 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3422 void cxgb4_disable_db_coalescing(struct net_device *dev)
3424 struct adapter *adap;
3426 adap = netdev2adap(dev);
3427 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3430 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3432 void cxgb4_enable_db_coalescing(struct net_device *dev)
3434 struct adapter *adap;
3436 adap = netdev2adap(dev);
3437 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3439 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3441 static struct pci_driver cxgb4_driver;
3443 static void check_neigh_update(struct neighbour *neigh)
3445 const struct device *parent;
3446 const struct net_device *netdev = neigh->dev;
3448 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3449 netdev = vlan_dev_real_dev(netdev);
3450 parent = netdev->dev.parent;
3451 if (parent && parent->driver == &cxgb4_driver.driver)
3452 t4_l2t_update(dev_get_drvdata(parent), neigh);
3455 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3459 case NETEVENT_NEIGH_UPDATE:
3460 check_neigh_update(data);
3462 case NETEVENT_REDIRECT:
3469 static bool netevent_registered;
3470 static struct notifier_block cxgb4_netevent_nb = {
3471 .notifier_call = netevent_cb
3474 static void drain_db_fifo(struct adapter *adap, int usecs)
3476 u32 v1, v2, lp_count, hp_count;
3479 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3480 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3481 if (is_t4(adap->chip)) {
3482 lp_count = G_LP_COUNT(v1);
3483 hp_count = G_HP_COUNT(v1);
3485 lp_count = G_LP_COUNT_T5(v1);
3486 hp_count = G_HP_COUNT_T5(v2);
3489 if (lp_count == 0 && hp_count == 0)
3491 set_current_state(TASK_UNINTERRUPTIBLE);
3492 schedule_timeout(usecs_to_jiffies(usecs));
3496 static void disable_txq_db(struct sge_txq *q)
3498 spin_lock_irq(&q->db_lock);
3500 spin_unlock_irq(&q->db_lock);
3503 static void enable_txq_db(struct sge_txq *q)
3505 spin_lock_irq(&q->db_lock);
3507 spin_unlock_irq(&q->db_lock);
3510 static void disable_dbs(struct adapter *adap)
3514 for_each_ethrxq(&adap->sge, i)
3515 disable_txq_db(&adap->sge.ethtxq[i].q);
3516 for_each_ofldrxq(&adap->sge, i)
3517 disable_txq_db(&adap->sge.ofldtxq[i].q);
3518 for_each_port(adap, i)
3519 disable_txq_db(&adap->sge.ctrlq[i].q);
3522 static void enable_dbs(struct adapter *adap)
3526 for_each_ethrxq(&adap->sge, i)
3527 enable_txq_db(&adap->sge.ethtxq[i].q);
3528 for_each_ofldrxq(&adap->sge, i)
3529 enable_txq_db(&adap->sge.ofldtxq[i].q);
3530 for_each_port(adap, i)
3531 enable_txq_db(&adap->sge.ctrlq[i].q);
3534 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3536 u16 hw_pidx, hw_cidx;
3539 spin_lock_bh(&q->db_lock);
3540 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3543 if (q->db_pidx != hw_pidx) {
3546 if (q->db_pidx >= hw_pidx)
3547 delta = q->db_pidx - hw_pidx;
3549 delta = q->size - hw_pidx + q->db_pidx;
3551 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3552 QID(q->cntxt_id) | PIDX(delta));
3556 spin_unlock_bh(&q->db_lock);
3558 CH_WARN(adap, "DB drop recovery failed.\n");
3560 static void recover_all_queues(struct adapter *adap)
3564 for_each_ethrxq(&adap->sge, i)
3565 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3566 for_each_ofldrxq(&adap->sge, i)
3567 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3568 for_each_port(adap, i)
3569 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3572 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3574 mutex_lock(&uld_mutex);
3575 if (adap->uld_handle[CXGB4_ULD_RDMA])
3576 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3578 mutex_unlock(&uld_mutex);
3581 static void process_db_full(struct work_struct *work)
3583 struct adapter *adap;
3585 adap = container_of(work, struct adapter, db_full_task);
3587 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3588 drain_db_fifo(adap, dbfifo_drain_delay);
3589 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3590 DBFIFO_HP_INT | DBFIFO_LP_INT,
3591 DBFIFO_HP_INT | DBFIFO_LP_INT);
3592 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3595 static void process_db_drop(struct work_struct *work)
3597 struct adapter *adap;
3599 adap = container_of(work, struct adapter, db_drop_task);
3601 if (is_t4(adap->chip)) {
3603 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3604 drain_db_fifo(adap, 1);
3605 recover_all_queues(adap);
3608 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3609 u16 qid = (dropped_db >> 15) & 0x1ffff;
3610 u16 pidx_inc = dropped_db & 0x1fff;
3612 unsigned short udb_density;
3613 unsigned long qpshift;
3617 dev_warn(adap->pdev_dev,
3618 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3620 (dropped_db >> 14) & 1,
3621 (dropped_db >> 13) & 1,
3624 drain_db_fifo(adap, 1);
3626 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
3627 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
3628 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
3629 qpshift = PAGE_SHIFT - ilog2(udb_density);
3630 udb = qid << qpshift;
3632 page = udb / PAGE_SIZE;
3633 udb += (qid - (page * udb_density)) * 128;
3635 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
3637 /* Re-enable BAR2 WC */
3638 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3641 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
3644 void t4_db_full(struct adapter *adap)
3646 if (is_t4(adap->chip)) {
3647 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3648 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3649 queue_work(workq, &adap->db_full_task);
3653 void t4_db_dropped(struct adapter *adap)
3655 if (is_t4(adap->chip))
3656 queue_work(workq, &adap->db_drop_task);
3659 static void uld_attach(struct adapter *adap, unsigned int uld)
3662 struct cxgb4_lld_info lli;
3665 lli.pdev = adap->pdev;
3666 lli.l2t = adap->l2t;
3667 lli.tids = &adap->tids;
3668 lli.ports = adap->port;
3669 lli.vr = &adap->vres;
3670 lli.mtus = adap->params.mtus;
3671 if (uld == CXGB4_ULD_RDMA) {
3672 lli.rxq_ids = adap->sge.rdma_rxq;
3673 lli.nrxq = adap->sge.rdmaqs;
3674 } else if (uld == CXGB4_ULD_ISCSI) {
3675 lli.rxq_ids = adap->sge.ofld_rxq;
3676 lli.nrxq = adap->sge.ofldqsets;
3678 lli.ntxq = adap->sge.ofldqsets;
3679 lli.nchan = adap->params.nports;
3680 lli.nports = adap->params.nports;
3681 lli.wr_cred = adap->params.ofldq_wr_cred;
3682 lli.adapter_type = adap->params.rev;
3683 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3684 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
3685 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3687 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3688 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3690 lli.filt_mode = adap->filter_mode;
3691 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3692 for (i = 0; i < NCHAN; i++)
3694 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3695 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3696 lli.fw_vers = adap->params.fw_vers;
3697 lli.dbfifo_int_thresh = dbfifo_int_thresh;
3698 lli.sge_pktshift = adap->sge.pktshift;
3699 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
3701 handle = ulds[uld].add(&lli);
3702 if (IS_ERR(handle)) {
3703 dev_warn(adap->pdev_dev,
3704 "could not attach to the %s driver, error %ld\n",
3705 uld_str[uld], PTR_ERR(handle));
3709 adap->uld_handle[uld] = handle;
3711 if (!netevent_registered) {
3712 register_netevent_notifier(&cxgb4_netevent_nb);
3713 netevent_registered = true;
3716 if (adap->flags & FULL_INIT_DONE)
3717 ulds[uld].state_change(handle, CXGB4_STATE_UP);
3720 static void attach_ulds(struct adapter *adap)
3724 mutex_lock(&uld_mutex);
3725 list_add_tail(&adap->list_node, &adapter_list);
3726 for (i = 0; i < CXGB4_ULD_MAX; i++)
3728 uld_attach(adap, i);
3729 mutex_unlock(&uld_mutex);
3732 static void detach_ulds(struct adapter *adap)
3736 mutex_lock(&uld_mutex);
3737 list_del(&adap->list_node);
3738 for (i = 0; i < CXGB4_ULD_MAX; i++)
3739 if (adap->uld_handle[i]) {
3740 ulds[i].state_change(adap->uld_handle[i],
3741 CXGB4_STATE_DETACH);
3742 adap->uld_handle[i] = NULL;
3744 if (netevent_registered && list_empty(&adapter_list)) {
3745 unregister_netevent_notifier(&cxgb4_netevent_nb);
3746 netevent_registered = false;
3748 mutex_unlock(&uld_mutex);
3751 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3755 mutex_lock(&uld_mutex);
3756 for (i = 0; i < CXGB4_ULD_MAX; i++)
3757 if (adap->uld_handle[i])
3758 ulds[i].state_change(adap->uld_handle[i], new_state);
3759 mutex_unlock(&uld_mutex);
3763 * cxgb4_register_uld - register an upper-layer driver
3764 * @type: the ULD type
3765 * @p: the ULD methods
3767 * Registers an upper-layer driver with this driver and notifies the ULD
3768 * about any presently available devices that support its type. Returns
3769 * %-EBUSY if a ULD of the same type is already registered.
3771 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3774 struct adapter *adap;
3776 if (type >= CXGB4_ULD_MAX)
3778 mutex_lock(&uld_mutex);
3779 if (ulds[type].add) {
3784 list_for_each_entry(adap, &adapter_list, list_node)
3785 uld_attach(adap, type);
3786 out: mutex_unlock(&uld_mutex);
3789 EXPORT_SYMBOL(cxgb4_register_uld);
3792 * cxgb4_unregister_uld - unregister an upper-layer driver
3793 * @type: the ULD type
3795 * Unregisters an existing upper-layer driver.
3797 int cxgb4_unregister_uld(enum cxgb4_uld type)
3799 struct adapter *adap;
3801 if (type >= CXGB4_ULD_MAX)
3803 mutex_lock(&uld_mutex);
3804 list_for_each_entry(adap, &adapter_list, list_node)
3805 adap->uld_handle[type] = NULL;
3806 ulds[type].add = NULL;
3807 mutex_unlock(&uld_mutex);
3810 EXPORT_SYMBOL(cxgb4_unregister_uld);
3813 * cxgb_up - enable the adapter
3814 * @adap: adapter being enabled
3816 * Called when the first port is enabled, this function performs the
3817 * actions necessary to make an adapter operational, such as completing
3818 * the initialization of HW modules, and enabling interrupts.
3820 * Must be called with the rtnl lock held.
3822 static int cxgb_up(struct adapter *adap)
3826 err = setup_sge_queues(adap);
3829 err = setup_rss(adap);
3833 if (adap->flags & USING_MSIX) {
3834 name_msix_vecs(adap);
3835 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
3836 adap->msix_info[0].desc, adap);
3840 err = request_msix_queue_irqs(adap);
3842 free_irq(adap->msix_info[0].vec, adap);
3846 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
3847 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
3848 adap->port[0]->name, adap);
3854 t4_intr_enable(adap);
3855 adap->flags |= FULL_INIT_DONE;
3856 notify_ulds(adap, CXGB4_STATE_UP);
3860 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
3862 t4_free_sge_resources(adap);
3866 static void cxgb_down(struct adapter *adapter)
3868 t4_intr_disable(adapter);
3869 cancel_work_sync(&adapter->tid_release_task);
3870 cancel_work_sync(&adapter->db_full_task);
3871 cancel_work_sync(&adapter->db_drop_task);
3872 adapter->tid_release_task_busy = false;
3873 adapter->tid_release_head = NULL;
3875 if (adapter->flags & USING_MSIX) {
3876 free_msix_queue_irqs(adapter);
3877 free_irq(adapter->msix_info[0].vec, adapter);
3879 free_irq(adapter->pdev->irq, adapter);
3880 quiesce_rx(adapter);
3881 t4_sge_stop(adapter);
3882 t4_free_sge_resources(adapter);
3883 adapter->flags &= ~FULL_INIT_DONE;
3887 * net_device operations
3889 static int cxgb_open(struct net_device *dev)
3892 struct port_info *pi = netdev_priv(dev);
3893 struct adapter *adapter = pi->adapter;
3895 netif_carrier_off(dev);
3897 if (!(adapter->flags & FULL_INIT_DONE)) {
3898 err = cxgb_up(adapter);
3903 err = link_start(dev);
3905 netif_tx_start_all_queues(dev);
3909 static int cxgb_close(struct net_device *dev)
3911 struct port_info *pi = netdev_priv(dev);
3912 struct adapter *adapter = pi->adapter;
3914 netif_tx_stop_all_queues(dev);
3915 netif_carrier_off(dev);
3916 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
3919 /* Return an error number if the indicated filter isn't writable ...
3921 static int writable_filter(struct filter_entry *f)
3931 /* Delete the filter at the specified index (if valid). The checks for all
3932 * the common problems with doing this like the filter being locked, currently
3933 * pending in another operation, etc.
3935 static int delete_filter(struct adapter *adapter, unsigned int fidx)
3937 struct filter_entry *f;
3940 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
3943 f = &adapter->tids.ftid_tab[fidx];
3944 ret = writable_filter(f);
3948 return del_filter_wr(adapter, fidx);
3953 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
3954 __be32 sip, __be16 sport, __be16 vlan,
3955 unsigned int queue, unsigned char port, unsigned char mask)
3958 struct filter_entry *f;
3959 struct adapter *adap;
3963 adap = netdev2adap(dev);
3965 /* Adjust stid to correct filter index */
3966 stid -= adap->tids.nstids;
3967 stid += adap->tids.nftids;
3969 /* Check to make sure the filter requested is writable ...
3971 f = &adap->tids.ftid_tab[stid];
3972 ret = writable_filter(f);
3976 /* Clear out any old resources being used by the filter before
3977 * we start constructing the new filter.
3980 clear_filter(adap, f);
3982 /* Clear out filter specifications */
3983 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
3984 f->fs.val.lport = cpu_to_be16(sport);
3985 f->fs.mask.lport = ~0;
3987 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
3988 for (i = 0; i < 4; i++) {
3989 f->fs.val.lip[i] = val[i];
3990 f->fs.mask.lip[i] = ~0;
3992 if (adap->filter_mode & F_PORT) {
3993 f->fs.val.iport = port;
3994 f->fs.mask.iport = mask;
4000 /* Mark filter as locked */
4004 ret = set_filter_wr(adap, stid);
4006 clear_filter(adap, f);
4012 EXPORT_SYMBOL(cxgb4_create_server_filter);
4014 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4015 unsigned int queue, bool ipv6)
4018 struct filter_entry *f;
4019 struct adapter *adap;
4021 adap = netdev2adap(dev);
4023 /* Adjust stid to correct filter index */
4024 stid -= adap->tids.nstids;
4025 stid += adap->tids.nftids;
4027 f = &adap->tids.ftid_tab[stid];
4028 /* Unlock the filter */
4031 ret = delete_filter(adap, stid);
4037 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4039 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4040 struct rtnl_link_stats64 *ns)
4042 struct port_stats stats;
4043 struct port_info *p = netdev_priv(dev);
4044 struct adapter *adapter = p->adapter;
4046 spin_lock(&adapter->stats_lock);
4047 t4_get_port_stats(adapter, p->tx_chan, &stats);
4048 spin_unlock(&adapter->stats_lock);
4050 ns->tx_bytes = stats.tx_octets;
4051 ns->tx_packets = stats.tx_frames;
4052 ns->rx_bytes = stats.rx_octets;
4053 ns->rx_packets = stats.rx_frames;
4054 ns->multicast = stats.rx_mcast_frames;
4056 /* detailed rx_errors */
4057 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4059 ns->rx_over_errors = 0;
4060 ns->rx_crc_errors = stats.rx_fcs_err;
4061 ns->rx_frame_errors = stats.rx_symbol_err;
4062 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4063 stats.rx_ovflow2 + stats.rx_ovflow3 +
4064 stats.rx_trunc0 + stats.rx_trunc1 +
4065 stats.rx_trunc2 + stats.rx_trunc3;
4066 ns->rx_missed_errors = 0;
4068 /* detailed tx_errors */
4069 ns->tx_aborted_errors = 0;
4070 ns->tx_carrier_errors = 0;
4071 ns->tx_fifo_errors = 0;
4072 ns->tx_heartbeat_errors = 0;
4073 ns->tx_window_errors = 0;
4075 ns->tx_errors = stats.tx_error_frames;
4076 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4077 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4081 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4084 int ret = 0, prtad, devad;
4085 struct port_info *pi = netdev_priv(dev);
4086 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4090 if (pi->mdio_addr < 0)
4092 data->phy_id = pi->mdio_addr;
4096 if (mdio_phy_id_is_c45(data->phy_id)) {
4097 prtad = mdio_phy_id_prtad(data->phy_id);
4098 devad = mdio_phy_id_devad(data->phy_id);
4099 } else if (data->phy_id < 32) {
4100 prtad = data->phy_id;
4102 data->reg_num &= 0x1f;
4106 mbox = pi->adapter->fn;
4107 if (cmd == SIOCGMIIREG)
4108 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4109 data->reg_num, &data->val_out);
4111 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4112 data->reg_num, data->val_in);
4120 static void cxgb_set_rxmode(struct net_device *dev)
4122 /* unfortunately we can't return errors to the stack */
4123 set_rxmode(dev, -1, false);
4126 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4129 struct port_info *pi = netdev_priv(dev);
4131 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4133 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4140 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4143 struct sockaddr *addr = p;
4144 struct port_info *pi = netdev_priv(dev);
4146 if (!is_valid_ether_addr(addr->sa_data))
4147 return -EADDRNOTAVAIL;
4149 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4150 pi->xact_addr_filt, addr->sa_data, true, true);
4154 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4155 pi->xact_addr_filt = ret;
4159 #ifdef CONFIG_NET_POLL_CONTROLLER
4160 static void cxgb_netpoll(struct net_device *dev)
4162 struct port_info *pi = netdev_priv(dev);
4163 struct adapter *adap = pi->adapter;
4165 if (adap->flags & USING_MSIX) {
4167 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4169 for (i = pi->nqsets; i; i--, rx++)
4170 t4_sge_intr_msix(0, &rx->rspq);
4172 t4_intr_handler(adap)(0, adap);
4176 static const struct net_device_ops cxgb4_netdev_ops = {
4177 .ndo_open = cxgb_open,
4178 .ndo_stop = cxgb_close,
4179 .ndo_start_xmit = t4_eth_xmit,
4180 .ndo_get_stats64 = cxgb_get_stats,
4181 .ndo_set_rx_mode = cxgb_set_rxmode,
4182 .ndo_set_mac_address = cxgb_set_mac_addr,
4183 .ndo_set_features = cxgb_set_features,
4184 .ndo_validate_addr = eth_validate_addr,
4185 .ndo_do_ioctl = cxgb_ioctl,
4186 .ndo_change_mtu = cxgb_change_mtu,
4187 #ifdef CONFIG_NET_POLL_CONTROLLER
4188 .ndo_poll_controller = cxgb_netpoll,
4192 void t4_fatal_err(struct adapter *adap)
4194 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4195 t4_intr_disable(adap);
4196 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4199 static void setup_memwin(struct adapter *adap)
4201 u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
4203 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
4204 if (is_t4(adap->chip)) {
4205 mem_win0_base = bar0 + MEMWIN0_BASE;
4206 mem_win1_base = bar0 + MEMWIN1_BASE;
4207 mem_win2_base = bar0 + MEMWIN2_BASE;
4209 /* For T5, only relative offset inside the PCIe BAR is passed */
4210 mem_win0_base = MEMWIN0_BASE;
4211 mem_win1_base = MEMWIN1_BASE_T5;
4212 mem_win2_base = MEMWIN2_BASE_T5;
4214 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4215 mem_win0_base | BIR(0) |
4216 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4217 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4218 mem_win1_base | BIR(0) |
4219 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4220 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4221 mem_win2_base | BIR(0) |
4222 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
4225 static void setup_memwin_rdma(struct adapter *adap)
4227 if (adap->vres.ocq.size) {
4228 unsigned int start, sz_kb;
4230 start = pci_resource_start(adap->pdev, 2) +
4231 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4232 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4234 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4235 start | BIR(1) | WINDOW(ilog2(sz_kb)));
4237 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4238 adap->vres.ocq.start);
4240 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4244 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4249 /* get device capabilities */
4250 memset(c, 0, sizeof(*c));
4251 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4252 FW_CMD_REQUEST | FW_CMD_READ);
4253 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4254 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
4258 /* select capabilities we'll be using */
4259 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4261 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4263 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4264 } else if (vf_acls) {
4265 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4268 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4269 FW_CMD_REQUEST | FW_CMD_WRITE);
4270 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
4274 ret = t4_config_glbl_rss(adap, adap->fn,
4275 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4276 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4277 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4281 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4282 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
4288 /* tweak some settings */
4289 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4290 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4291 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4292 v = t4_read_reg(adap, TP_PIO_DATA);
4293 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
4295 /* first 4 Tx modulation queues point to consecutive Tx channels */
4296 adap->params.tp.tx_modq_map = 0xE4;
4297 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4298 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4300 /* associate each Tx modulation queue with consecutive Tx channels */
4302 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4303 &v, 1, A_TP_TX_SCHED_HDR);
4304 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4305 &v, 1, A_TP_TX_SCHED_FIFO);
4306 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4307 &v, 1, A_TP_TX_SCHED_PCMD);
4309 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4310 if (is_offload(adap)) {
4311 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4312 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4313 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4314 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4315 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4316 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4317 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4318 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4319 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4320 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4323 /* get basic stuff going */
4324 return t4_early_init(adap, adap->fn);
4328 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4330 #define MAX_ATIDS 8192U
4333 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4335 * If the firmware we're dealing with has Configuration File support, then
4336 * we use that to perform all configuration
4340 * Tweak configuration based on module parameters, etc. Most of these have
4341 * defaults assigned to them by Firmware Configuration Files (if we're using
4342 * them) but need to be explicitly set if we're using hard-coded
4343 * initialization. But even in the case of using Firmware Configuration
4344 * Files, we'd like to expose the ability to change these via module
4345 * parameters so these are essentially common tweaks/settings for
4346 * Configuration Files and hard-coded initialization ...
4348 static int adap_init0_tweaks(struct adapter *adapter)
4351 * Fix up various Host-Dependent Parameters like Page Size, Cache
4352 * Line Size, etc. The firmware default is for a 4KB Page Size and
4353 * 64B Cache Line Size ...
4355 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4358 * Process module parameters which affect early initialization.
4360 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4361 dev_err(&adapter->pdev->dev,
4362 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4366 t4_set_reg_field(adapter, SGE_CONTROL,
4368 PKTSHIFT(rx_dma_offset));
4371 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4372 * adds the pseudo header itself.
4374 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
4375 CSUM_HAS_PSEUDO_HDR, 0);
4381 * Attempt to initialize the adapter via a Firmware Configuration File.
4383 static int adap_init0_config(struct adapter *adapter, int reset)
4385 struct fw_caps_config_cmd caps_cmd;
4386 const struct firmware *cf;
4387 unsigned long mtype = 0, maddr = 0;
4388 u32 finiver, finicsum, cfcsum;
4389 int ret, using_flash;
4390 char *fw_config_file, fw_config_file_path[256];
4393 * Reset device if necessary.
4396 ret = t4_fw_reset(adapter, adapter->mbox,
4397 PIORSTMODE | PIORST);
4403 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4404 * then use that. Otherwise, use the configuration file stored
4405 * in the adapter flash ...
4407 switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
4409 fw_config_file = FW_CFNAME;
4412 fw_config_file = FW5_CFNAME;
4415 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4416 adapter->pdev->device);
4421 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4424 mtype = FW_MEMTYPE_CF_FLASH;
4425 maddr = t4_flash_cfg_addr(adapter);
4427 u32 params[7], val[7];
4430 if (cf->size >= FLASH_CFG_MAX_SIZE)
4433 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4434 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4435 ret = t4_query_params(adapter, adapter->mbox,
4436 adapter->fn, 0, 1, params, val);
4439 * For t4_memory_write() below addresses and
4440 * sizes have to be in terms of multiples of 4
4441 * bytes. So, if the Configuration File isn't
4442 * a multiple of 4 bytes in length we'll have
4443 * to write that out separately since we can't
4444 * guarantee that the bytes following the
4445 * residual byte in the buffer returned by
4446 * request_firmware() are zeroed out ...
4448 size_t resid = cf->size & 0x3;
4449 size_t size = cf->size & ~0x3;
4450 __be32 *data = (__be32 *)cf->data;
4452 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
4453 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
4455 ret = t4_memory_write(adapter, mtype, maddr,
4457 if (ret == 0 && resid != 0) {
4464 last.word = data[size >> 2];
4465 for (i = resid; i < 4; i++)
4467 ret = t4_memory_write(adapter, mtype,
4474 release_firmware(cf);
4480 * Issue a Capability Configuration command to the firmware to get it
4481 * to parse the Configuration File. We don't use t4_fw_config_file()
4482 * because we want the ability to modify various features after we've
4483 * processed the configuration file ...
4485 memset(&caps_cmd, 0, sizeof(caps_cmd));
4486 caps_cmd.op_to_write =
4487 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4490 caps_cmd.cfvalid_to_len16 =
4491 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
4492 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4493 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4494 FW_LEN16(caps_cmd));
4495 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4500 finiver = ntohl(caps_cmd.finiver);
4501 finicsum = ntohl(caps_cmd.finicsum);
4502 cfcsum = ntohl(caps_cmd.cfcsum);
4503 if (finicsum != cfcsum)
4504 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4505 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4509 * And now tell the firmware to use the configuration we just loaded.
4511 caps_cmd.op_to_write =
4512 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4515 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4516 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4522 * Tweak configuration based on system architecture, module
4525 ret = adap_init0_tweaks(adapter);
4530 * And finally tell the firmware to initialize itself using the
4531 * parameters from the Configuration File.
4533 ret = t4_fw_initialize(adapter, adapter->mbox);
4537 sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
4539 * Return successfully and note that we're operating with parameters
4540 * not supplied by the driver, rather than from hard-wired
4541 * initialization constants burried in the driver.
4543 adapter->flags |= USING_SOFT_PARAMS;
4544 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4545 "Configuration File %s, version %#x, computed checksum %#x\n",
4548 : fw_config_file_path),
4553 * Something bad happened. Return the error ... (If the "error"
4554 * is that there's no Configuration File on the adapter we don't
4555 * want to issue a warning since this is fairly common.)
4559 dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
4565 * Attempt to initialize the adapter via hard-coded, driver supplied
4568 static int adap_init0_no_config(struct adapter *adapter, int reset)
4570 struct sge *s = &adapter->sge;
4571 struct fw_caps_config_cmd caps_cmd;
4576 * Reset device if necessary
4579 ret = t4_fw_reset(adapter, adapter->mbox,
4580 PIORSTMODE | PIORST);
4586 * Get device capabilities and select which we'll be using.
4588 memset(&caps_cmd, 0, sizeof(caps_cmd));
4589 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4590 FW_CMD_REQUEST | FW_CMD_READ);
4591 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4592 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4597 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4599 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4601 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4602 } else if (vf_acls) {
4603 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
4606 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4607 FW_CMD_REQUEST | FW_CMD_WRITE);
4608 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4614 * Tweak configuration based on system architecture, module
4617 ret = adap_init0_tweaks(adapter);
4622 * Select RSS Global Mode we want to use. We use "Basic Virtual"
4623 * mode which maps each Virtual Interface to its own section of
4624 * the RSS Table and we turn on all map and hash enables ...
4626 adapter->flags |= RSS_TNLALLLOOKUP;
4627 ret = t4_config_glbl_rss(adapter, adapter->mbox,
4628 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4629 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4630 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
4631 ((adapter->flags & RSS_TNLALLLOOKUP) ?
4632 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
4637 * Set up our own fundamental resource provisioning ...
4639 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
4640 PFRES_NEQ, PFRES_NETHCTRL,
4641 PFRES_NIQFLINT, PFRES_NIQ,
4642 PFRES_TC, PFRES_NVI,
4643 FW_PFVF_CMD_CMASK_MASK,
4644 pfvfres_pmask(adapter, adapter->fn, 0),
4646 PFRES_R_CAPS, PFRES_WX_CAPS);
4651 * Perform low level SGE initialization. We need to do this before we
4652 * send the firmware the INITIALIZE command because that will cause
4653 * any other PF Drivers which are waiting for the Master
4654 * Initialization to proceed forward.
4656 for (i = 0; i < SGE_NTIMERS - 1; i++)
4657 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
4658 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
4659 s->counter_val[0] = 1;
4660 for (i = 1; i < SGE_NCOUNTERS; i++)
4661 s->counter_val[i] = min(intr_cnt[i - 1],
4662 THRESHOLD_0_GET(THRESHOLD_0_MASK));
4663 t4_sge_init(adapter);
4665 #ifdef CONFIG_PCI_IOV
4667 * Provision resource limits for Virtual Functions. We currently
4668 * grant them all the same static resource limits except for the Port
4669 * Access Rights Mask which we're assigning based on the PF. All of
4670 * the static provisioning stuff for both the PF and VF really needs
4671 * to be managed in a persistent manner for each device which the
4672 * firmware controls.
4677 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
4678 if (num_vf[pf] <= 0)
4681 /* VF numbering starts at 1! */
4682 for (vf = 1; vf <= num_vf[pf]; vf++) {
4683 ret = t4_cfg_pfvf(adapter, adapter->mbox,
4685 VFRES_NEQ, VFRES_NETHCTRL,
4686 VFRES_NIQFLINT, VFRES_NIQ,
4687 VFRES_TC, VFRES_NVI,
4688 FW_PFVF_CMD_CMASK_MASK,
4692 VFRES_R_CAPS, VFRES_WX_CAPS);
4694 dev_warn(adapter->pdev_dev,
4696 "provision pf/vf=%d/%d; "
4697 "err=%d\n", pf, vf, ret);
4704 * Set up the default filter mode. Later we'll want to implement this
4705 * via a firmware command, etc. ... This needs to be done before the
4706 * firmare initialization command ... If the selected set of fields
4707 * isn't equal to the default value, we'll need to make sure that the
4708 * field selections will fit in the 36-bit budget.
4710 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
4713 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
4714 switch (tp_vlan_pri_map & (1 << j)) {
4716 /* compressed filter field not enabled */
4736 case ETHERTYPE_MASK:
4742 case MPSHITTYPE_MASK:
4745 case FRAGMENTATION_MASK:
4751 dev_err(adapter->pdev_dev,
4752 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
4753 " using %#x\n", tp_vlan_pri_map, bits,
4754 TP_VLAN_PRI_MAP_DEFAULT);
4755 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
4758 v = tp_vlan_pri_map;
4759 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
4760 &v, 1, TP_VLAN_PRI_MAP);
4763 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
4764 * to support any of the compressed filter fields above. Newer
4765 * versions of the firmware do this automatically but it doesn't hurt
4766 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
4767 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
4768 * since the firmware automatically turns this on and off when we have
4769 * a non-zero number of filters active (since it does have a
4770 * performance impact).
4772 if (tp_vlan_pri_map)
4773 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
4774 FIVETUPLELOOKUP_MASK,
4775 FIVETUPLELOOKUP_MASK);
4778 * Tweak some settings.
4780 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
4781 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
4782 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
4783 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
4786 * Get basic stuff going by issuing the Firmware Initialize command.
4787 * Note that this _must_ be after all PFVF commands ...
4789 ret = t4_fw_initialize(adapter, adapter->mbox);
4794 * Return successfully!
4796 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
4797 "driver parameters\n");
4801 * Something bad happened. Return the error ...
4808 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4810 static int adap_init0(struct adapter *adap)
4814 enum dev_state state;
4815 u32 params[7], val[7];
4816 struct fw_caps_config_cmd caps_cmd;
4820 * Contact FW, advertising Master capability (and potentially forcing
4821 * ourselves as the Master PF if our module parameter force_init is
4824 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
4825 force_init ? MASTER_MUST : MASTER_MAY,
4828 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4832 if (ret == adap->mbox)
4833 adap->flags |= MASTER_PF;
4834 if (force_init && state == DEV_STATE_INIT)
4835 state = DEV_STATE_UNINIT;
4838 * If we're the Master PF Driver and the device is uninitialized,
4839 * then let's consider upgrading the firmware ... (We always want
4840 * to check the firmware version number in order to A. get it for
4841 * later reporting and B. to warn if the currently loaded firmware
4842 * is excessively mismatched relative to the driver.)
4844 ret = t4_check_fw_version(adap);
4845 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
4846 if (ret == -EINVAL || ret > 0) {
4847 if (upgrade_fw(adap) >= 0) {
4849 * Note that the chip was reset as part of the
4850 * firmware upgrade so we don't reset it again
4851 * below and grab the new firmware version.
4854 ret = t4_check_fw_version(adap);
4862 * Grab VPD parameters. This should be done after we establish a
4863 * connection to the firmware since some of the VPD parameters
4864 * (notably the Core Clock frequency) are retrieved via requests to
4865 * the firmware. On the other hand, we need these fairly early on
4866 * so we do this right after getting ahold of the firmware.
4868 ret = get_vpd_params(adap, &adap->params.vpd);
4873 * Find out what ports are available to us. Note that we need to do
4874 * this before calling adap_init0_no_config() since it needs nports
4878 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4879 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
4880 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
4884 adap->params.nports = hweight32(port_vec);
4885 adap->params.portvec = port_vec;
4888 * If the firmware is initialized already (and we're not forcing a
4889 * master initialization), note that we're living with existing
4890 * adapter parameters. Otherwise, it's time to try initializing the
4893 if (state == DEV_STATE_INIT) {
4894 dev_info(adap->pdev_dev, "Coming up as %s: "\
4895 "Adapter already initialized\n",
4896 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
4897 adap->flags |= USING_SOFT_PARAMS;
4899 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4900 "Initializing adapter\n");
4903 * If the firmware doesn't support Configuration
4904 * Files warn user and exit,
4907 dev_warn(adap->pdev_dev, "Firmware doesn't support "
4908 "configuration file.\n");
4910 ret = adap_init0_no_config(adap, reset);
4913 * Find out whether we're dealing with a version of
4914 * the firmware which has configuration file support.
4916 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4917 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4918 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
4922 * If the firmware doesn't support Configuration
4923 * Files, use the old Driver-based, hard-wired
4924 * initialization. Otherwise, try using the
4925 * Configuration File support and fall back to the
4926 * Driver-based initialization if there's no
4927 * Configuration File found.
4930 ret = adap_init0_no_config(adap, reset);
4933 * The firmware provides us with a memory
4934 * buffer where we can load a Configuration
4935 * File from the host if we want to override
4936 * the Configuration File in flash.
4939 ret = adap_init0_config(adap, reset);
4940 if (ret == -ENOENT) {
4941 dev_info(adap->pdev_dev,
4942 "No Configuration File present "
4943 "on adapter. Using hard-wired "
4944 "configuration parameters.\n");
4945 ret = adap_init0_no_config(adap, reset);
4950 dev_err(adap->pdev_dev,
4951 "could not initialize adapter, error %d\n",
4958 * If we're living with non-hard-coded parameters (either from a
4959 * Firmware Configuration File or values programmed by a different PF
4960 * Driver), give the SGE code a chance to pull in anything that it
4961 * needs ... Note that this must be called after we retrieve our VPD
4962 * parameters in order to know how to convert core ticks to seconds.
4964 if (adap->flags & USING_SOFT_PARAMS) {
4965 ret = t4_sge_init(adap);
4970 if (is_bypass_device(adap->pdev->device))
4971 adap->params.bypass = 1;
4974 * Grab some of our basic fundamental operating parameters.
4976 #define FW_PARAM_DEV(param) \
4977 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
4978 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
4980 #define FW_PARAM_PFVF(param) \
4981 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
4982 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
4983 FW_PARAMS_PARAM_Y(0) | \
4984 FW_PARAMS_PARAM_Z(0)
4986 params[0] = FW_PARAM_PFVF(EQ_START);
4987 params[1] = FW_PARAM_PFVF(L2T_START);
4988 params[2] = FW_PARAM_PFVF(L2T_END);
4989 params[3] = FW_PARAM_PFVF(FILTER_START);
4990 params[4] = FW_PARAM_PFVF(FILTER_END);
4991 params[5] = FW_PARAM_PFVF(IQFLINT_START);
4992 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
4995 adap->sge.egr_start = val[0];
4996 adap->l2t_start = val[1];
4997 adap->l2t_end = val[2];
4998 adap->tids.ftid_base = val[3];
4999 adap->tids.nftids = val[4] - val[3] + 1;
5000 adap->sge.ingr_start = val[5];
5002 /* query params related to active filter region */
5003 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5004 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5005 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5006 /* If Active filter size is set we enable establishing
5007 * offload connection through firmware work request
5009 if ((val[0] != val[1]) && (ret >= 0)) {
5010 adap->flags |= FW_OFLD_CONN;
5011 adap->tids.aftid_base = val[0];
5012 adap->tids.aftid_end = val[1];
5015 /* If we're running on newer firmware, let it know that we're
5016 * prepared to deal with encapsulated CPL messages. Older
5017 * firmware won't understand this and we'll just get
5018 * unencapsulated messages ...
5020 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5022 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5025 * Get device capabilities so we can determine what resources we need
5028 memset(&caps_cmd, 0, sizeof(caps_cmd));
5029 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5030 FW_CMD_REQUEST | FW_CMD_READ);
5031 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5032 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5037 if (caps_cmd.ofldcaps) {
5038 /* query offload-related parameters */
5039 params[0] = FW_PARAM_DEV(NTID);
5040 params[1] = FW_PARAM_PFVF(SERVER_START);
5041 params[2] = FW_PARAM_PFVF(SERVER_END);
5042 params[3] = FW_PARAM_PFVF(TDDP_START);
5043 params[4] = FW_PARAM_PFVF(TDDP_END);
5044 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5045 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5049 adap->tids.ntids = val[0];
5050 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5051 adap->tids.stid_base = val[1];
5052 adap->tids.nstids = val[2] - val[1] + 1;
5054 * Setup server filter region. Divide the availble filter
5055 * region into two parts. Regular filters get 1/3rd and server
5056 * filters get 2/3rd part. This is only enabled if workarond
5058 * 1. For regular filters.
5059 * 2. Server filter: This are special filters which are used
5060 * to redirect SYN packets to offload queue.
5062 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5063 adap->tids.sftid_base = adap->tids.ftid_base +
5064 DIV_ROUND_UP(adap->tids.nftids, 3);
5065 adap->tids.nsftids = adap->tids.nftids -
5066 DIV_ROUND_UP(adap->tids.nftids, 3);
5067 adap->tids.nftids = adap->tids.sftid_base -
5068 adap->tids.ftid_base;
5070 adap->vres.ddp.start = val[3];
5071 adap->vres.ddp.size = val[4] - val[3] + 1;
5072 adap->params.ofldq_wr_cred = val[5];
5074 adap->params.offload = 1;
5076 if (caps_cmd.rdmacaps) {
5077 params[0] = FW_PARAM_PFVF(STAG_START);
5078 params[1] = FW_PARAM_PFVF(STAG_END);
5079 params[2] = FW_PARAM_PFVF(RQ_START);
5080 params[3] = FW_PARAM_PFVF(RQ_END);
5081 params[4] = FW_PARAM_PFVF(PBL_START);
5082 params[5] = FW_PARAM_PFVF(PBL_END);
5083 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5087 adap->vres.stag.start = val[0];
5088 adap->vres.stag.size = val[1] - val[0] + 1;
5089 adap->vres.rq.start = val[2];
5090 adap->vres.rq.size = val[3] - val[2] + 1;
5091 adap->vres.pbl.start = val[4];
5092 adap->vres.pbl.size = val[5] - val[4] + 1;
5094 params[0] = FW_PARAM_PFVF(SQRQ_START);
5095 params[1] = FW_PARAM_PFVF(SQRQ_END);
5096 params[2] = FW_PARAM_PFVF(CQ_START);
5097 params[3] = FW_PARAM_PFVF(CQ_END);
5098 params[4] = FW_PARAM_PFVF(OCQ_START);
5099 params[5] = FW_PARAM_PFVF(OCQ_END);
5100 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
5103 adap->vres.qp.start = val[0];
5104 adap->vres.qp.size = val[1] - val[0] + 1;
5105 adap->vres.cq.start = val[2];
5106 adap->vres.cq.size = val[3] - val[2] + 1;
5107 adap->vres.ocq.start = val[4];
5108 adap->vres.ocq.size = val[5] - val[4] + 1;
5110 if (caps_cmd.iscsicaps) {
5111 params[0] = FW_PARAM_PFVF(ISCSI_START);
5112 params[1] = FW_PARAM_PFVF(ISCSI_END);
5113 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5117 adap->vres.iscsi.start = val[0];
5118 adap->vres.iscsi.size = val[1] - val[0] + 1;
5120 #undef FW_PARAM_PFVF
5124 * These are finalized by FW initialization, load their values now.
5126 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
5127 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
5128 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
5129 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5130 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5131 adap->params.b_wnd);
5133 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5134 for (j = 0; j < NCHAN; j++)
5135 adap->params.tp.tx_modq[j] = j;
5137 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5138 &adap->filter_mode, 1,
5141 adap->flags |= FW_OK;
5145 * Something bad happened. If a command timed out or failed with EIO
5146 * FW does not operate within its spec or something catastrophic
5147 * happened to HW/FW, stop issuing commands.
5150 if (ret != -ETIMEDOUT && ret != -EIO)
5151 t4_fw_bye(adap, adap->mbox);
5157 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5158 pci_channel_state_t state)
5161 struct adapter *adap = pci_get_drvdata(pdev);
5167 adap->flags &= ~FW_OK;
5168 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5169 for_each_port(adap, i) {
5170 struct net_device *dev = adap->port[i];
5172 netif_device_detach(dev);
5173 netif_carrier_off(dev);
5175 if (adap->flags & FULL_INIT_DONE)
5178 pci_disable_device(pdev);
5179 out: return state == pci_channel_io_perm_failure ?
5180 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5183 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5186 struct fw_caps_config_cmd c;
5187 struct adapter *adap = pci_get_drvdata(pdev);
5190 pci_restore_state(pdev);
5191 pci_save_state(pdev);
5192 return PCI_ERS_RESULT_RECOVERED;
5195 if (pci_enable_device(pdev)) {
5196 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
5197 return PCI_ERS_RESULT_DISCONNECT;
5200 pci_set_master(pdev);
5201 pci_restore_state(pdev);
5202 pci_save_state(pdev);
5203 pci_cleanup_aer_uncorrect_error_status(pdev);
5205 if (t4_wait_dev_ready(adap) < 0)
5206 return PCI_ERS_RESULT_DISCONNECT;
5207 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
5208 return PCI_ERS_RESULT_DISCONNECT;
5209 adap->flags |= FW_OK;
5210 if (adap_init1(adap, &c))
5211 return PCI_ERS_RESULT_DISCONNECT;
5213 for_each_port(adap, i) {
5214 struct port_info *p = adap2pinfo(adap, i);
5216 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5219 return PCI_ERS_RESULT_DISCONNECT;
5221 p->xact_addr_filt = -1;
5224 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5225 adap->params.b_wnd);
5228 return PCI_ERS_RESULT_DISCONNECT;
5229 return PCI_ERS_RESULT_RECOVERED;
5232 static void eeh_resume(struct pci_dev *pdev)
5235 struct adapter *adap = pci_get_drvdata(pdev);
5241 for_each_port(adap, i) {
5242 struct net_device *dev = adap->port[i];
5244 if (netif_running(dev)) {
5246 cxgb_set_rxmode(dev);
5248 netif_device_attach(dev);
5253 static const struct pci_error_handlers cxgb4_eeh = {
5254 .error_detected = eeh_err_detected,
5255 .slot_reset = eeh_slot_reset,
5256 .resume = eeh_resume,
5259 static inline bool is_10g_port(const struct link_config *lc)
5261 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
5264 static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
5265 unsigned int size, unsigned int iqe_size)
5267 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
5268 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
5269 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5270 q->iqe_len = iqe_size;
5275 * Perform default configuration of DMA queues depending on the number and type
5276 * of ports we found and the number of available CPUs. Most settings can be
5277 * modified by the admin prior to actual use.
5279 static void cfg_queues(struct adapter *adap)
5281 struct sge *s = &adap->sge;
5282 int i, q10g = 0, n10g = 0, qidx = 0;
5284 for_each_port(adap, i)
5285 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
5288 * We default to 1 queue per non-10G port and up to # of cores queues
5292 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5293 if (q10g > netif_get_num_default_rss_queues())
5294 q10g = netif_get_num_default_rss_queues();
5296 for_each_port(adap, i) {
5297 struct port_info *pi = adap2pinfo(adap, i);
5299 pi->first_qset = qidx;
5300 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
5305 s->max_ethqsets = qidx; /* MSI-X may lower it later */
5307 if (is_offload(adap)) {
5309 * For offload we use 1 queue/channel if all ports are up to 1G,
5310 * otherwise we divide all available queues amongst the channels
5311 * capped by the number of available cores.
5314 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5316 s->ofldqsets = roundup(i, adap->params.nports);
5318 s->ofldqsets = adap->params.nports;
5319 /* For RDMA one Rx queue per channel suffices */
5320 s->rdmaqs = adap->params.nports;
5323 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5324 struct sge_eth_rxq *r = &s->ethrxq[i];
5326 init_rspq(&r->rspq, 0, 0, 1024, 64);
5330 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5331 s->ethtxq[i].q.size = 1024;
5333 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5334 s->ctrlq[i].q.size = 512;
5336 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5337 s->ofldtxq[i].q.size = 1024;
5339 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5340 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5342 init_rspq(&r->rspq, 0, 0, 1024, 64);
5343 r->rspq.uld = CXGB4_ULD_ISCSI;
5347 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5348 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5350 init_rspq(&r->rspq, 0, 0, 511, 64);
5351 r->rspq.uld = CXGB4_ULD_RDMA;
5355 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
5356 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
5360 * Reduce the number of Ethernet queues across all ports to at most n.
5361 * n provides at least one queue per port.
5363 static void reduce_ethqs(struct adapter *adap, int n)
5366 struct port_info *pi;
5368 while (n < adap->sge.ethqsets)
5369 for_each_port(adap, i) {
5370 pi = adap2pinfo(adap, i);
5371 if (pi->nqsets > 1) {
5373 adap->sge.ethqsets--;
5374 if (adap->sge.ethqsets <= n)
5380 for_each_port(adap, i) {
5381 pi = adap2pinfo(adap, i);
5387 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5388 #define EXTRA_VECS 2
5390 static int enable_msix(struct adapter *adap)
5393 int i, err, want, need;
5394 struct sge *s = &adap->sge;
5395 unsigned int nchan = adap->params.nports;
5396 struct msix_entry entries[MAX_INGQ + 1];
5398 for (i = 0; i < ARRAY_SIZE(entries); ++i)
5399 entries[i].entry = i;
5401 want = s->max_ethqsets + EXTRA_VECS;
5402 if (is_offload(adap)) {
5403 want += s->rdmaqs + s->ofldqsets;
5404 /* need nchan for each possible ULD */
5405 ofld_need = 2 * nchan;
5407 need = adap->params.nports + EXTRA_VECS + ofld_need;
5409 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
5414 * Distribute available vectors to the various queue groups.
5415 * Every group gets its minimum requirement and NIC gets top
5416 * priority for leftovers.
5418 i = want - EXTRA_VECS - ofld_need;
5419 if (i < s->max_ethqsets) {
5420 s->max_ethqsets = i;
5421 if (i < s->ethqsets)
5422 reduce_ethqs(adap, i);
5424 if (is_offload(adap)) {
5425 i = want - EXTRA_VECS - s->max_ethqsets;
5426 i -= ofld_need - nchan;
5427 s->ofldqsets = (i / nchan) * nchan; /* round down */
5429 for (i = 0; i < want; ++i)
5430 adap->msix_info[i].vec = entries[i].vector;
5432 dev_info(adap->pdev_dev,
5433 "only %d MSI-X vectors left, not using MSI-X\n", err);
5439 static int init_rss(struct adapter *adap)
5443 for_each_port(adap, i) {
5444 struct port_info *pi = adap2pinfo(adap, i);
5446 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5449 for (j = 0; j < pi->rss_size; j++)
5450 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
5455 static void print_port_info(const struct net_device *dev)
5457 static const char *base[] = {
5458 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
5459 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
5464 const char *spd = "";
5465 const struct port_info *pi = netdev_priv(dev);
5466 const struct adapter *adap = pi->adapter;
5468 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5470 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5473 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5474 bufp += sprintf(bufp, "100/");
5475 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5476 bufp += sprintf(bufp, "1000/");
5477 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5478 bufp += sprintf(bufp, "10G/");
5481 sprintf(bufp, "BASE-%s", base[pi->port_type]);
5483 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
5484 adap->params.vpd.id,
5485 CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
5486 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5487 (adap->flags & USING_MSIX) ? " MSI-X" :
5488 (adap->flags & USING_MSI) ? " MSI" : "");
5489 netdev_info(dev, "S/N: %s, E/C: %s\n",
5490 adap->params.vpd.sn, adap->params.vpd.ec);
5493 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
5495 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
5499 * Free the following resources:
5500 * - memory used for tables
5503 * - resources FW is holding for us
5505 static void free_some_resources(struct adapter *adapter)
5509 t4_free_mem(adapter->l2t);
5510 t4_free_mem(adapter->tids.tid_tab);
5511 disable_msi(adapter);
5513 for_each_port(adapter, i)
5514 if (adapter->port[i]) {
5515 kfree(adap2pinfo(adapter, i)->rss);
5516 free_netdev(adapter->port[i]);
5518 if (adapter->flags & FW_OK)
5519 t4_fw_bye(adapter, adapter->fn);
5522 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
5523 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5524 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5525 #define SEGMENT_SIZE 128
5527 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5529 int func, i, err, s_qpp, qpp, num_seg;
5530 struct port_info *pi;
5531 bool highdma = false;
5532 struct adapter *adapter = NULL;
5534 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5536 err = pci_request_regions(pdev, KBUILD_MODNAME);
5538 /* Just info, some other driver may have claimed the device. */
5539 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5543 /* We control everything through one PF */
5544 func = PCI_FUNC(pdev->devfn);
5545 if (func != ent->driver_data) {
5546 pci_save_state(pdev); /* to restore SR-IOV later */
5550 err = pci_enable_device(pdev);
5552 dev_err(&pdev->dev, "cannot enable PCI device\n");
5553 goto out_release_regions;
5556 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5558 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5560 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5561 "coherent allocations\n");
5562 goto out_disable_device;
5565 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5567 dev_err(&pdev->dev, "no usable DMA configuration\n");
5568 goto out_disable_device;
5572 pci_enable_pcie_error_reporting(pdev);
5573 enable_pcie_relaxed_ordering(pdev);
5574 pci_set_master(pdev);
5575 pci_save_state(pdev);
5577 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5580 goto out_disable_device;
5583 adapter->regs = pci_ioremap_bar(pdev, 0);
5584 if (!adapter->regs) {
5585 dev_err(&pdev->dev, "cannot map device registers\n");
5587 goto out_free_adapter;
5590 adapter->pdev = pdev;
5591 adapter->pdev_dev = &pdev->dev;
5592 adapter->mbox = func;
5594 adapter->msg_enable = dflt_msg_enable;
5595 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5597 spin_lock_init(&adapter->stats_lock);
5598 spin_lock_init(&adapter->tid_release_lock);
5600 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
5601 INIT_WORK(&adapter->db_full_task, process_db_full);
5602 INIT_WORK(&adapter->db_drop_task, process_db_drop);
5604 err = t4_prep_adapter(adapter);
5606 goto out_unmap_bar0;
5608 if (!is_t4(adapter->chip)) {
5609 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
5610 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
5611 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
5612 num_seg = PAGE_SIZE / SEGMENT_SIZE;
5614 /* Each segment size is 128B. Write coalescing is enabled only
5615 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5616 * queue is less no of segments that can be accommodated in
5619 if (qpp > num_seg) {
5621 "Incorrect number of egress queues per page\n");
5623 goto out_unmap_bar0;
5625 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5626 pci_resource_len(pdev, 2));
5627 if (!adapter->bar2) {
5628 dev_err(&pdev->dev, "cannot map device bar2 region\n");
5630 goto out_unmap_bar0;
5634 setup_memwin(adapter);
5635 err = adap_init0(adapter);
5636 setup_memwin_rdma(adapter);
5640 for_each_port(adapter, i) {
5641 struct net_device *netdev;
5643 netdev = alloc_etherdev_mq(sizeof(struct port_info),
5650 SET_NETDEV_DEV(netdev, &pdev->dev);
5652 adapter->port[i] = netdev;
5653 pi = netdev_priv(netdev);
5654 pi->adapter = adapter;
5655 pi->xact_addr_filt = -1;
5657 netdev->irq = pdev->irq;
5659 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5660 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5661 NETIF_F_RXCSUM | NETIF_F_RXHASH |
5662 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
5664 netdev->hw_features |= NETIF_F_HIGHDMA;
5665 netdev->features |= netdev->hw_features;
5666 netdev->vlan_features = netdev->features & VLAN_FEAT;
5668 netdev->priv_flags |= IFF_UNICAST_FLT;
5670 netdev->netdev_ops = &cxgb4_netdev_ops;
5671 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
5674 pci_set_drvdata(pdev, adapter);
5676 if (adapter->flags & FW_OK) {
5677 err = t4_port_init(adapter, func, func, 0);
5683 * Configure queues and allocate tables now, they can be needed as
5684 * soon as the first register_netdev completes.
5686 cfg_queues(adapter);
5688 adapter->l2t = t4_init_l2t();
5689 if (!adapter->l2t) {
5690 /* We tolerate a lack of L2T, giving up some functionality */
5691 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
5692 adapter->params.offload = 0;
5695 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
5696 dev_warn(&pdev->dev, "could not allocate TID table, "
5698 adapter->params.offload = 0;
5701 /* See what interrupts we'll be using */
5702 if (msi > 1 && enable_msix(adapter) == 0)
5703 adapter->flags |= USING_MSIX;
5704 else if (msi > 0 && pci_enable_msi(pdev) == 0)
5705 adapter->flags |= USING_MSI;
5707 err = init_rss(adapter);
5712 * The card is now ready to go. If any errors occur during device
5713 * registration we do not fail the whole card but rather proceed only
5714 * with the ports we manage to register successfully. However we must
5715 * register at least one net device.
5717 for_each_port(adapter, i) {
5718 pi = adap2pinfo(adapter, i);
5719 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
5720 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
5722 err = register_netdev(adapter->port[i]);
5725 adapter->chan_map[pi->tx_chan] = i;
5726 print_port_info(adapter->port[i]);
5729 dev_err(&pdev->dev, "could not register any net devices\n");
5733 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
5737 if (cxgb4_debugfs_root) {
5738 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
5739 cxgb4_debugfs_root);
5740 setup_debugfs(adapter);
5743 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
5744 pdev->needs_freset = 1;
5746 if (is_offload(adapter))
5747 attach_ulds(adapter);
5750 #ifdef CONFIG_PCI_IOV
5751 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
5752 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
5753 dev_info(&pdev->dev,
5754 "instantiated %u virtual functions\n",
5760 free_some_resources(adapter);
5762 if (!is_t4(adapter->chip))
5763 iounmap(adapter->bar2);
5765 iounmap(adapter->regs);
5769 pci_disable_pcie_error_reporting(pdev);
5770 pci_disable_device(pdev);
5771 out_release_regions:
5772 pci_release_regions(pdev);
5773 pci_set_drvdata(pdev, NULL);
5777 static void remove_one(struct pci_dev *pdev)
5779 struct adapter *adapter = pci_get_drvdata(pdev);
5781 #ifdef CONFIG_PCI_IOV
5782 pci_disable_sriov(pdev);
5789 if (is_offload(adapter))
5790 detach_ulds(adapter);
5792 for_each_port(adapter, i)
5793 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5794 unregister_netdev(adapter->port[i]);
5796 if (adapter->debugfs_root)
5797 debugfs_remove_recursive(adapter->debugfs_root);
5799 /* If we allocated filters, free up state associated with any
5802 if (adapter->tids.ftid_tab) {
5803 struct filter_entry *f = &adapter->tids.ftid_tab[0];
5804 for (i = 0; i < (adapter->tids.nftids +
5805 adapter->tids.nsftids); i++, f++)
5807 clear_filter(adapter, f);
5810 if (adapter->flags & FULL_INIT_DONE)
5813 free_some_resources(adapter);
5814 iounmap(adapter->regs);
5815 if (!is_t4(adapter->chip))
5816 iounmap(adapter->bar2);
5818 pci_disable_pcie_error_reporting(pdev);
5819 pci_disable_device(pdev);
5820 pci_release_regions(pdev);
5821 pci_set_drvdata(pdev, NULL);
5823 pci_release_regions(pdev);
5826 static struct pci_driver cxgb4_driver = {
5827 .name = KBUILD_MODNAME,
5828 .id_table = cxgb4_pci_tbl,
5830 .remove = remove_one,
5831 .err_handler = &cxgb4_eeh,
5834 static int __init cxgb4_init_module(void)
5838 workq = create_singlethread_workqueue("cxgb4");
5842 /* Debugfs support is optional, just warn if this fails */
5843 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
5844 if (!cxgb4_debugfs_root)
5845 pr_warn("could not create debugfs entry, continuing\n");
5847 ret = pci_register_driver(&cxgb4_driver);
5849 debugfs_remove(cxgb4_debugfs_root);
5853 static void __exit cxgb4_cleanup_module(void)
5855 pci_unregister_driver(&cxgb4_driver);
5856 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
5857 flush_workqueue(workq);
5858 destroy_workqueue(workq);
5861 module_init(cxgb4_init_module);
5862 module_exit(cxgb4_cleanup_module);