Merge tag 'drm/tegra/for-5.13-rc5' of ssh://git.freedesktop.org/git/tegra/linux into...
[linux-2.6-microblaze.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <net/bonding.h>
65 #include <linux/uaccess.h>
66 #include <linux/crash_dump.h>
67 #include <net/udp_tunnel.h>
68 #include <net/xfrm.h>
69 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
70 #include <net/tls.h>
71 #endif
72
73 #include "cxgb4.h"
74 #include "cxgb4_filter.h"
75 #include "t4_regs.h"
76 #include "t4_values.h"
77 #include "t4_msg.h"
78 #include "t4fw_api.h"
79 #include "t4fw_version.h"
80 #include "cxgb4_dcb.h"
81 #include "srq.h"
82 #include "cxgb4_debugfs.h"
83 #include "clip_tbl.h"
84 #include "l2t.h"
85 #include "smt.h"
86 #include "sched.h"
87 #include "cxgb4_tc_u32.h"
88 #include "cxgb4_tc_flower.h"
89 #include "cxgb4_tc_mqprio.h"
90 #include "cxgb4_tc_matchall.h"
91 #include "cxgb4_ptp.h"
92 #include "cxgb4_cudbg.h"
93
94 char cxgb4_driver_name[] = KBUILD_MODNAME;
95
96 #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
97
98 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
99                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
100                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
101
102 /* Macros needed to support the PCI Device ID Table ...
103  */
104 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
105         static const struct pci_device_id cxgb4_pci_tbl[] = {
106 #define CXGB4_UNIFIED_PF 0x4
107
108 #define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
109
110 /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
111  * called for both.
112  */
113 #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
114
115 #define CH_PCI_ID_TABLE_ENTRY(devid) \
116                 {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
117
118 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
119                 { 0, } \
120         }
121
122 #include "t4_pci_id_tbl.h"
123
124 #define FW4_FNAME "cxgb4/t4fw.bin"
125 #define FW5_FNAME "cxgb4/t5fw.bin"
126 #define FW6_FNAME "cxgb4/t6fw.bin"
127 #define FW4_CFNAME "cxgb4/t4-config.txt"
128 #define FW5_CFNAME "cxgb4/t5-config.txt"
129 #define FW6_CFNAME "cxgb4/t6-config.txt"
130 #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
131 #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
132 #define PHY_AQ1202_DEVICEID 0x4409
133 #define PHY_BCM84834_DEVICEID 0x4486
134
135 MODULE_DESCRIPTION(DRV_DESC);
136 MODULE_AUTHOR("Chelsio Communications");
137 MODULE_LICENSE("Dual BSD/GPL");
138 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
139 MODULE_FIRMWARE(FW4_FNAME);
140 MODULE_FIRMWARE(FW5_FNAME);
141 MODULE_FIRMWARE(FW6_FNAME);
142
143 /*
144  * The driver uses the best interrupt scheme available on a platform in the
145  * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
146  * of these schemes the driver may consider as follows:
147  *
148  * msi = 2: choose from among all three options
149  * msi = 1: only consider MSI and INTx interrupts
150  * msi = 0: force INTx interrupts
151  */
152 static int msi = 2;
153
154 module_param(msi, int, 0644);
155 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
156
157 /*
158  * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
159  * offset by 2 bytes in order to have the IP headers line up on 4-byte
160  * boundaries.  This is a requirement for many architectures which will throw
161  * a machine check fault if an attempt is made to access one of the 4-byte IP
162  * header fields on a non-4-byte boundary.  And it's a major performance issue
163  * even on some architectures which allow it like some implementations of the
164  * x86 ISA.  However, some architectures don't mind this and for some very
165  * edge-case performance sensitive applications (like forwarding large volumes
166  * of small packets), setting this DMA offset to 0 will decrease the number of
167  * PCI-E Bus transfers enough to measurably affect performance.
168  */
169 static int rx_dma_offset = 2;
170
171 /* TX Queue select used to determine what algorithm to use for selecting TX
172  * queue. Select between the kernel provided function (select_queue=0) or user
173  * cxgb_select_queue function (select_queue=1)
174  *
175  * Default: select_queue=0
176  */
177 static int select_queue;
178 module_param(select_queue, int, 0644);
179 MODULE_PARM_DESC(select_queue,
180                  "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
181
182 static struct dentry *cxgb4_debugfs_root;
183
184 LIST_HEAD(adapter_list);
185 DEFINE_MUTEX(uld_mutex);
186 LIST_HEAD(uld_list);
187
188 static int cfg_queues(struct adapter *adap);
189
190 static void link_report(struct net_device *dev)
191 {
192         if (!netif_carrier_ok(dev))
193                 netdev_info(dev, "link down\n");
194         else {
195                 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
196
197                 const char *s;
198                 const struct port_info *p = netdev_priv(dev);
199
200                 switch (p->link_cfg.speed) {
201                 case 100:
202                         s = "100Mbps";
203                         break;
204                 case 1000:
205                         s = "1Gbps";
206                         break;
207                 case 10000:
208                         s = "10Gbps";
209                         break;
210                 case 25000:
211                         s = "25Gbps";
212                         break;
213                 case 40000:
214                         s = "40Gbps";
215                         break;
216                 case 50000:
217                         s = "50Gbps";
218                         break;
219                 case 100000:
220                         s = "100Gbps";
221                         break;
222                 default:
223                         pr_info("%s: unsupported speed: %d\n",
224                                 dev->name, p->link_cfg.speed);
225                         return;
226                 }
227
228                 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
229                             fc[p->link_cfg.fc]);
230         }
231 }
232
233 #ifdef CONFIG_CHELSIO_T4_DCB
234 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
235 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
236 {
237         struct port_info *pi = netdev_priv(dev);
238         struct adapter *adap = pi->adapter;
239         struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
240         int i;
241
242         /* We use a simple mapping of Port TX Queue Index to DCB
243          * Priority when we're enabling DCB.
244          */
245         for (i = 0; i < pi->nqsets; i++, txq++) {
246                 u32 name, value;
247                 int err;
248
249                 name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
250                         FW_PARAMS_PARAM_X_V(
251                                 FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
252                         FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
253                 value = enable ? i : 0xffffffff;
254
255                 /* Since we can be called while atomic (from "interrupt
256                  * level") we need to issue the Set Parameters Commannd
257                  * without sleeping (timeout < 0).
258                  */
259                 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
260                                             &name, &value,
261                                             -FW_CMD_MAX_TIMEOUT);
262
263                 if (err)
264                         dev_err(adap->pdev_dev,
265                                 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
266                                 enable ? "set" : "unset", pi->port_id, i, -err);
267                 else
268                         txq->dcb_prio = enable ? value : 0;
269         }
270 }
271
272 int cxgb4_dcb_enabled(const struct net_device *dev)
273 {
274         struct port_info *pi = netdev_priv(dev);
275
276         if (!pi->dcb.enabled)
277                 return 0;
278
279         return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
280                 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
281 }
282 #endif /* CONFIG_CHELSIO_T4_DCB */
283
284 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
285 {
286         struct net_device *dev = adapter->port[port_id];
287
288         /* Skip changes from disabled ports. */
289         if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
290                 if (link_stat)
291                         netif_carrier_on(dev);
292                 else {
293 #ifdef CONFIG_CHELSIO_T4_DCB
294                         if (cxgb4_dcb_enabled(dev)) {
295                                 cxgb4_dcb_reset(dev);
296                                 dcb_tx_queue_prio_enable(dev, false);
297                         }
298 #endif /* CONFIG_CHELSIO_T4_DCB */
299                         netif_carrier_off(dev);
300                 }
301
302                 link_report(dev);
303         }
304 }
305
306 void t4_os_portmod_changed(struct adapter *adap, int port_id)
307 {
308         static const char *mod_str[] = {
309                 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
310         };
311
312         struct net_device *dev = adap->port[port_id];
313         struct port_info *pi = netdev_priv(dev);
314
315         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
316                 netdev_info(dev, "port module unplugged\n");
317         else if (pi->mod_type < ARRAY_SIZE(mod_str))
318                 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
319         else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
320                 netdev_info(dev, "%s: unsupported port module inserted\n",
321                             dev->name);
322         else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
323                 netdev_info(dev, "%s: unknown port module inserted\n",
324                             dev->name);
325         else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
326                 netdev_info(dev, "%s: transceiver module error\n", dev->name);
327         else
328                 netdev_info(dev, "%s: unknown module type %d inserted\n",
329                             dev->name, pi->mod_type);
330
331         /* If the interface is running, then we'll need any "sticky" Link
332          * Parameters redone with a new Transceiver Module.
333          */
334         pi->link_cfg.redo_l1cfg = netif_running(dev);
335 }
336
337 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
338 module_param(dbfifo_int_thresh, int, 0644);
339 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
340
341 /*
342  * usecs to sleep while draining the dbfifo
343  */
344 static int dbfifo_drain_delay = 1000;
345 module_param(dbfifo_drain_delay, int, 0644);
346 MODULE_PARM_DESC(dbfifo_drain_delay,
347                  "usecs to sleep while draining the dbfifo");
348
349 static inline int cxgb4_set_addr_hash(struct port_info *pi)
350 {
351         struct adapter *adap = pi->adapter;
352         u64 vec = 0;
353         bool ucast = false;
354         struct hash_mac_addr *entry;
355
356         /* Calculate the hash vector for the updated list and program it */
357         list_for_each_entry(entry, &adap->mac_hlist, list) {
358                 ucast |= is_unicast_ether_addr(entry->addr);
359                 vec |= (1ULL << hash_mac_addr(entry->addr));
360         }
361         return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
362                                 vec, false);
363 }
364
365 static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
366 {
367         struct port_info *pi = netdev_priv(netdev);
368         struct adapter *adap = pi->adapter;
369         int ret;
370         u64 mhash = 0;
371         u64 uhash = 0;
372         /* idx stores the index of allocated filters,
373          * its size should be modified based on the number of
374          * MAC addresses that we allocate filters for
375          */
376
377         u16 idx[1] = {};
378         bool free = false;
379         bool ucast = is_unicast_ether_addr(mac_addr);
380         const u8 *maclist[1] = {mac_addr};
381         struct hash_mac_addr *new_entry;
382
383         ret = cxgb4_alloc_mac_filt(adap, pi->viid, free, 1, maclist,
384                                    idx, ucast ? &uhash : &mhash, false);
385         if (ret < 0)
386                 goto out;
387         /* if hash != 0, then add the addr to hash addr list
388          * so on the end we will calculate the hash for the
389          * list and program it
390          */
391         if (uhash || mhash) {
392                 new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
393                 if (!new_entry)
394                         return -ENOMEM;
395                 ether_addr_copy(new_entry->addr, mac_addr);
396                 list_add_tail(&new_entry->list, &adap->mac_hlist);
397                 ret = cxgb4_set_addr_hash(pi);
398         }
399 out:
400         return ret < 0 ? ret : 0;
401 }
402
403 static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
404 {
405         struct port_info *pi = netdev_priv(netdev);
406         struct adapter *adap = pi->adapter;
407         int ret;
408         const u8 *maclist[1] = {mac_addr};
409         struct hash_mac_addr *entry, *tmp;
410
411         /* If the MAC address to be removed is in the hash addr
412          * list, delete it from the list and update hash vector
413          */
414         list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
415                 if (ether_addr_equal(entry->addr, mac_addr)) {
416                         list_del(&entry->list);
417                         kfree(entry);
418                         return cxgb4_set_addr_hash(pi);
419                 }
420         }
421
422         ret = cxgb4_free_mac_filt(adap, pi->viid, 1, maclist, false);
423         return ret < 0 ? -EINVAL : 0;
424 }
425
426 /*
427  * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
428  * If @mtu is -1 it is left unchanged.
429  */
430 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
431 {
432         struct port_info *pi = netdev_priv(dev);
433         struct adapter *adapter = pi->adapter;
434
435         __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
436         __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
437
438         return t4_set_rxmode(adapter, adapter->mbox, pi->viid, pi->viid_mirror,
439                              mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
440                              (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
441                              sleep_ok);
442 }
443
444 /**
445  *      cxgb4_change_mac - Update match filter for a MAC address.
446  *      @pi: the port_info
447  *      @viid: the VI id
448  *      @tcam_idx: TCAM index of existing filter for old value of MAC address,
449  *                 or -1
450  *      @addr: the new MAC address value
451  *      @persist: whether a new MAC allocation should be persistent
452  *      @smt_idx: the destination to store the new SMT index.
453  *
454  *      Modifies an MPS filter and sets it to the new MAC address if
455  *      @tcam_idx >= 0, or adds the MAC address to a new filter if
456  *      @tcam_idx < 0. In the latter case the address is added persistently
457  *      if @persist is %true.
458  *      Addresses are programmed to hash region, if tcam runs out of entries.
459  *
460  */
461 int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
462                      int *tcam_idx, const u8 *addr, bool persist,
463                      u8 *smt_idx)
464 {
465         struct adapter *adapter = pi->adapter;
466         struct hash_mac_addr *entry, *new_entry;
467         int ret;
468
469         ret = t4_change_mac(adapter, adapter->mbox, viid,
470                             *tcam_idx, addr, persist, smt_idx);
471         /* We ran out of TCAM entries. try programming hash region. */
472         if (ret == -ENOMEM) {
473                 /* If the MAC address to be updated is in the hash addr
474                  * list, update it from the list
475                  */
476                 list_for_each_entry(entry, &adapter->mac_hlist, list) {
477                         if (entry->iface_mac) {
478                                 ether_addr_copy(entry->addr, addr);
479                                 goto set_hash;
480                         }
481                 }
482                 new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
483                 if (!new_entry)
484                         return -ENOMEM;
485                 ether_addr_copy(new_entry->addr, addr);
486                 new_entry->iface_mac = true;
487                 list_add_tail(&new_entry->list, &adapter->mac_hlist);
488 set_hash:
489                 ret = cxgb4_set_addr_hash(pi);
490         } else if (ret >= 0) {
491                 *tcam_idx = ret;
492                 ret = 0;
493         }
494
495         return ret;
496 }
497
498 /*
499  *      link_start - enable a port
500  *      @dev: the port to enable
501  *
502  *      Performs the MAC and PHY actions needed to enable a port.
503  */
504 static int link_start(struct net_device *dev)
505 {
506         struct port_info *pi = netdev_priv(dev);
507         unsigned int mb = pi->adapter->mbox;
508         int ret;
509
510         /*
511          * We do not set address filters and promiscuity here, the stack does
512          * that step explicitly.
513          */
514         ret = t4_set_rxmode(pi->adapter, mb, pi->viid, pi->viid_mirror,
515                             dev->mtu, -1, -1, -1,
516                             !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
517         if (ret == 0)
518                 ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
519                                             dev->dev_addr, true, &pi->smt_idx);
520         if (ret == 0)
521                 ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
522                                     &pi->link_cfg);
523         if (ret == 0) {
524                 local_bh_disable();
525                 ret = t4_enable_pi_params(pi->adapter, mb, pi, true,
526                                           true, CXGB4_DCB_ENABLED);
527                 local_bh_enable();
528         }
529
530         return ret;
531 }
532
533 #ifdef CONFIG_CHELSIO_T4_DCB
534 /* Handle a Data Center Bridging update message from the firmware. */
535 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
536 {
537         int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
538         struct net_device *dev = adap->port[adap->chan_map[port]];
539         int old_dcb_enabled = cxgb4_dcb_enabled(dev);
540         int new_dcb_enabled;
541
542         cxgb4_dcb_handle_fw_update(adap, pcmd);
543         new_dcb_enabled = cxgb4_dcb_enabled(dev);
544
545         /* If the DCB has become enabled or disabled on the port then we're
546          * going to need to set up/tear down DCB Priority parameters for the
547          * TX Queues associated with the port.
548          */
549         if (new_dcb_enabled != old_dcb_enabled)
550                 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
551 }
552 #endif /* CONFIG_CHELSIO_T4_DCB */
553
554 /* Response queue handler for the FW event queue.
555  */
556 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
557                           const struct pkt_gl *gl)
558 {
559         u8 opcode = ((const struct rss_header *)rsp)->opcode;
560
561         rsp++;                                          /* skip RSS header */
562
563         /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
564          */
565         if (unlikely(opcode == CPL_FW4_MSG &&
566            ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
567                 rsp++;
568                 opcode = ((const struct rss_header *)rsp)->opcode;
569                 rsp++;
570                 if (opcode != CPL_SGE_EGR_UPDATE) {
571                         dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
572                                 , opcode);
573                         goto out;
574                 }
575         }
576
577         if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
578                 const struct cpl_sge_egr_update *p = (void *)rsp;
579                 unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
580                 struct sge_txq *txq;
581
582                 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
583                 txq->restarts++;
584                 if (txq->q_type == CXGB4_TXQ_ETH) {
585                         struct sge_eth_txq *eq;
586
587                         eq = container_of(txq, struct sge_eth_txq, q);
588                         t4_sge_eth_txq_egress_update(q->adap, eq, -1);
589                 } else {
590                         struct sge_uld_txq *oq;
591
592                         oq = container_of(txq, struct sge_uld_txq, q);
593                         tasklet_schedule(&oq->qresume_tsk);
594                 }
595         } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
596                 const struct cpl_fw6_msg *p = (void *)rsp;
597
598 #ifdef CONFIG_CHELSIO_T4_DCB
599                 const struct fw_port_cmd *pcmd = (const void *)p->data;
600                 unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
601                 unsigned int action =
602                         FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
603
604                 if (cmd == FW_PORT_CMD &&
605                     (action == FW_PORT_ACTION_GET_PORT_INFO ||
606                      action == FW_PORT_ACTION_GET_PORT_INFO32)) {
607                         int port = FW_PORT_CMD_PORTID_G(
608                                         be32_to_cpu(pcmd->op_to_portid));
609                         struct net_device *dev;
610                         int dcbxdis, state_input;
611
612                         dev = q->adap->port[q->adap->chan_map[port]];
613                         dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
614                           ? !!(pcmd->u.info.dcbxdis_pkd & FW_PORT_CMD_DCBXDIS_F)
615                           : !!(be32_to_cpu(pcmd->u.info32.lstatus32_to_cbllen32)
616                                & FW_PORT_CMD_DCBXDIS32_F));
617                         state_input = (dcbxdis
618                                        ? CXGB4_DCB_INPUT_FW_DISABLED
619                                        : CXGB4_DCB_INPUT_FW_ENABLED);
620
621                         cxgb4_dcb_state_fsm(dev, state_input);
622                 }
623
624                 if (cmd == FW_PORT_CMD &&
625                     action == FW_PORT_ACTION_L2_DCB_CFG)
626                         dcb_rpl(q->adap, pcmd);
627                 else
628 #endif
629                         if (p->type == 0)
630                                 t4_handle_fw_rpl(q->adap, p->data);
631         } else if (opcode == CPL_L2T_WRITE_RPL) {
632                 const struct cpl_l2t_write_rpl *p = (void *)rsp;
633
634                 do_l2t_write_rpl(q->adap, p);
635         } else if (opcode == CPL_SMT_WRITE_RPL) {
636                 const struct cpl_smt_write_rpl *p = (void *)rsp;
637
638                 do_smt_write_rpl(q->adap, p);
639         } else if (opcode == CPL_SET_TCB_RPL) {
640                 const struct cpl_set_tcb_rpl *p = (void *)rsp;
641
642                 filter_rpl(q->adap, p);
643         } else if (opcode == CPL_ACT_OPEN_RPL) {
644                 const struct cpl_act_open_rpl *p = (void *)rsp;
645
646                 hash_filter_rpl(q->adap, p);
647         } else if (opcode == CPL_ABORT_RPL_RSS) {
648                 const struct cpl_abort_rpl_rss *p = (void *)rsp;
649
650                 hash_del_filter_rpl(q->adap, p);
651         } else if (opcode == CPL_SRQ_TABLE_RPL) {
652                 const struct cpl_srq_table_rpl *p = (void *)rsp;
653
654                 do_srq_table_rpl(q->adap, p);
655         } else
656                 dev_err(q->adap->pdev_dev,
657                         "unexpected CPL %#x on FW event queue\n", opcode);
658 out:
659         return 0;
660 }
661
662 static void disable_msi(struct adapter *adapter)
663 {
664         if (adapter->flags & CXGB4_USING_MSIX) {
665                 pci_disable_msix(adapter->pdev);
666                 adapter->flags &= ~CXGB4_USING_MSIX;
667         } else if (adapter->flags & CXGB4_USING_MSI) {
668                 pci_disable_msi(adapter->pdev);
669                 adapter->flags &= ~CXGB4_USING_MSI;
670         }
671 }
672
673 /*
674  * Interrupt handler for non-data events used with MSI-X.
675  */
676 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
677 {
678         struct adapter *adap = cookie;
679         u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
680
681         if (v & PFSW_F) {
682                 adap->swintr = 1;
683                 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
684         }
685         if (adap->flags & CXGB4_MASTER_PF)
686                 t4_slow_intr_handler(adap);
687         return IRQ_HANDLED;
688 }
689
690 int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
691                        cpumask_var_t *aff_mask, int idx)
692 {
693         int rv;
694
695         if (!zalloc_cpumask_var(aff_mask, GFP_KERNEL)) {
696                 dev_err(adap->pdev_dev, "alloc_cpumask_var failed\n");
697                 return -ENOMEM;
698         }
699
700         cpumask_set_cpu(cpumask_local_spread(idx, dev_to_node(adap->pdev_dev)),
701                         *aff_mask);
702
703         rv = irq_set_affinity_hint(vec, *aff_mask);
704         if (rv)
705                 dev_warn(adap->pdev_dev,
706                          "irq_set_affinity_hint %u failed %d\n",
707                          vec, rv);
708
709         return 0;
710 }
711
712 void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask)
713 {
714         irq_set_affinity_hint(vec, NULL);
715         free_cpumask_var(aff_mask);
716 }
717
718 static int request_msix_queue_irqs(struct adapter *adap)
719 {
720         struct sge *s = &adap->sge;
721         struct msix_info *minfo;
722         int err, ethqidx;
723
724         if (s->fwevtq_msix_idx < 0)
725                 return -ENOMEM;
726
727         err = request_irq(adap->msix_info[s->fwevtq_msix_idx].vec,
728                           t4_sge_intr_msix, 0,
729                           adap->msix_info[s->fwevtq_msix_idx].desc,
730                           &s->fw_evtq);
731         if (err)
732                 return err;
733
734         for_each_ethrxq(s, ethqidx) {
735                 minfo = s->ethrxq[ethqidx].msix;
736                 err = request_irq(minfo->vec,
737                                   t4_sge_intr_msix, 0,
738                                   minfo->desc,
739                                   &s->ethrxq[ethqidx].rspq);
740                 if (err)
741                         goto unwind;
742
743                 cxgb4_set_msix_aff(adap, minfo->vec,
744                                    &minfo->aff_mask, ethqidx);
745         }
746         return 0;
747
748 unwind:
749         while (--ethqidx >= 0) {
750                 minfo = s->ethrxq[ethqidx].msix;
751                 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
752                 free_irq(minfo->vec, &s->ethrxq[ethqidx].rspq);
753         }
754         free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
755         return err;
756 }
757
758 static void free_msix_queue_irqs(struct adapter *adap)
759 {
760         struct sge *s = &adap->sge;
761         struct msix_info *minfo;
762         int i;
763
764         free_irq(adap->msix_info[s->fwevtq_msix_idx].vec, &s->fw_evtq);
765         for_each_ethrxq(s, i) {
766                 minfo = s->ethrxq[i].msix;
767                 cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
768                 free_irq(minfo->vec, &s->ethrxq[i].rspq);
769         }
770 }
771
772 static int setup_ppod_edram(struct adapter *adap)
773 {
774         unsigned int param, val;
775         int ret;
776
777         /* Driver sends FW_PARAMS_PARAM_DEV_PPOD_EDRAM read command to check
778          * if firmware supports ppod edram feature or not. If firmware
779          * returns 1, then driver can enable this feature by sending
780          * FW_PARAMS_PARAM_DEV_PPOD_EDRAM write command with value 1 to
781          * enable ppod edram feature.
782          */
783         param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
784                 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PPOD_EDRAM));
785
786         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
787         if (ret < 0) {
788                 dev_warn(adap->pdev_dev,
789                          "querying PPOD_EDRAM support failed: %d\n",
790                          ret);
791                 return -1;
792         }
793
794         if (val != 1)
795                 return -1;
796
797         ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
798         if (ret < 0) {
799                 dev_err(adap->pdev_dev,
800                         "setting PPOD_EDRAM failed: %d\n", ret);
801                 return -1;
802         }
803         return 0;
804 }
805
806 static void adap_config_hpfilter(struct adapter *adapter)
807 {
808         u32 param, val = 0;
809         int ret;
810
811         /* Enable HP filter region. Older fw will fail this request and
812          * it is fine.
813          */
814         param = FW_PARAM_DEV(HPFILTER_REGION_SUPPORT);
815         ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
816                             1, &param, &val);
817
818         /* An error means FW doesn't know about HP filter support,
819          * it's not a problem, don't return an error.
820          */
821         if (ret < 0)
822                 dev_err(adapter->pdev_dev,
823                         "HP filter region isn't supported by FW\n");
824 }
825
826 static int cxgb4_config_rss(const struct port_info *pi, u16 *rss,
827                             u16 rss_size, u16 viid)
828 {
829         struct adapter *adap = pi->adapter;
830         int ret;
831
832         ret = t4_config_rss_range(adap, adap->mbox, viid, 0, rss_size, rss,
833                                   rss_size);
834         if (ret)
835                 return ret;
836
837         /* If Tunnel All Lookup isn't specified in the global RSS
838          * Configuration, then we need to specify a default Ingress
839          * Queue for any ingress packets which aren't hashed.  We'll
840          * use our first ingress queue ...
841          */
842         return t4_config_vi_rss(adap, adap->mbox, viid,
843                                 FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
844                                 FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
845                                 FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
846                                 FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
847                                 FW_RSS_VI_CONFIG_CMD_UDPEN_F,
848                                 rss[0]);
849 }
850
851 /**
852  *      cxgb4_write_rss - write the RSS table for a given port
853  *      @pi: the port
854  *      @queues: array of queue indices for RSS
855  *
856  *      Sets up the portion of the HW RSS table for the port's VI to distribute
857  *      packets to the Rx queues in @queues.
858  *      Should never be called before setting up sge eth rx queues
859  */
860 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
861 {
862         struct adapter *adapter = pi->adapter;
863         const struct sge_eth_rxq *rxq;
864         int i, err;
865         u16 *rss;
866
867         rxq = &adapter->sge.ethrxq[pi->first_qset];
868         rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
869         if (!rss)
870                 return -ENOMEM;
871
872         /* map the queue indices to queue ids */
873         for (i = 0; i < pi->rss_size; i++, queues++)
874                 rss[i] = rxq[*queues].rspq.abs_id;
875
876         err = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid);
877         kfree(rss);
878         return err;
879 }
880
881 /**
882  *      setup_rss - configure RSS
883  *      @adap: the adapter
884  *
885  *      Sets up RSS for each port.
886  */
887 static int setup_rss(struct adapter *adap)
888 {
889         int i, j, err;
890
891         for_each_port(adap, i) {
892                 const struct port_info *pi = adap2pinfo(adap, i);
893
894                 /* Fill default values with equal distribution */
895                 for (j = 0; j < pi->rss_size; j++)
896                         pi->rss[j] = j % pi->nqsets;
897
898                 err = cxgb4_write_rss(pi, pi->rss);
899                 if (err)
900                         return err;
901         }
902         return 0;
903 }
904
905 /*
906  * Return the channel of the ingress queue with the given qid.
907  */
908 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
909 {
910         qid -= p->ingr_start;
911         return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
912 }
913
914 void cxgb4_quiesce_rx(struct sge_rspq *q)
915 {
916         if (q->handler)
917                 napi_disable(&q->napi);
918 }
919
920 /*
921  * Wait until all NAPI handlers are descheduled.
922  */
923 static void quiesce_rx(struct adapter *adap)
924 {
925         int i;
926
927         for (i = 0; i < adap->sge.ingr_sz; i++) {
928                 struct sge_rspq *q = adap->sge.ingr_map[i];
929
930                 if (!q)
931                         continue;
932
933                 cxgb4_quiesce_rx(q);
934         }
935 }
936
937 /* Disable interrupt and napi handler */
938 static void disable_interrupts(struct adapter *adap)
939 {
940         struct sge *s = &adap->sge;
941
942         if (adap->flags & CXGB4_FULL_INIT_DONE) {
943                 t4_intr_disable(adap);
944                 if (adap->flags & CXGB4_USING_MSIX) {
945                         free_msix_queue_irqs(adap);
946                         free_irq(adap->msix_info[s->nd_msix_idx].vec,
947                                  adap);
948                 } else {
949                         free_irq(adap->pdev->irq, adap);
950                 }
951                 quiesce_rx(adap);
952         }
953 }
954
955 void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q)
956 {
957         if (q->handler)
958                 napi_enable(&q->napi);
959
960         /* 0-increment GTS to start the timer and enable interrupts */
961         t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
962                      SEINTARM_V(q->intr_params) |
963                      INGRESSQID_V(q->cntxt_id));
964 }
965
966 /*
967  * Enable NAPI scheduling and interrupt generation for all Rx queues.
968  */
969 static void enable_rx(struct adapter *adap)
970 {
971         int i;
972
973         for (i = 0; i < adap->sge.ingr_sz; i++) {
974                 struct sge_rspq *q = adap->sge.ingr_map[i];
975
976                 if (!q)
977                         continue;
978
979                 cxgb4_enable_rx(adap, q);
980         }
981 }
982
983 static int setup_non_data_intr(struct adapter *adap)
984 {
985         int msix;
986
987         adap->sge.nd_msix_idx = -1;
988         if (!(adap->flags & CXGB4_USING_MSIX))
989                 return 0;
990
991         /* Request MSI-X vector for non-data interrupt */
992         msix = cxgb4_get_msix_idx_from_bmap(adap);
993         if (msix < 0)
994                 return -ENOMEM;
995
996         snprintf(adap->msix_info[msix].desc,
997                  sizeof(adap->msix_info[msix].desc),
998                  "%s", adap->port[0]->name);
999
1000         adap->sge.nd_msix_idx = msix;
1001         return 0;
1002 }
1003
1004 static int setup_fw_sge_queues(struct adapter *adap)
1005 {
1006         struct sge *s = &adap->sge;
1007         int msix, err = 0;
1008
1009         bitmap_zero(s->starving_fl, s->egr_sz);
1010         bitmap_zero(s->txq_maperr, s->egr_sz);
1011
1012         if (adap->flags & CXGB4_USING_MSIX) {
1013                 s->fwevtq_msix_idx = -1;
1014                 msix = cxgb4_get_msix_idx_from_bmap(adap);
1015                 if (msix < 0)
1016                         return -ENOMEM;
1017
1018                 snprintf(adap->msix_info[msix].desc,
1019                          sizeof(adap->msix_info[msix].desc),
1020                          "%s-FWeventq", adap->port[0]->name);
1021         } else {
1022                 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1023                                        NULL, NULL, NULL, -1);
1024                 if (err)
1025                         return err;
1026                 msix = -((int)s->intrq.abs_id + 1);
1027         }
1028
1029         err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1030                                msix, NULL, fwevtq_handler, NULL, -1);
1031         if (err && msix >= 0)
1032                 cxgb4_free_msix_idx_in_bmap(adap, msix);
1033
1034         s->fwevtq_msix_idx = msix;
1035         return err;
1036 }
1037
1038 /**
1039  *      setup_sge_queues - configure SGE Tx/Rx/response queues
1040  *      @adap: the adapter
1041  *
1042  *      Determines how many sets of SGE queues to use and initializes them.
1043  *      We support multiple queue sets per port if we have MSI-X, otherwise
1044  *      just one queue set per port.
1045  */
1046 static int setup_sge_queues(struct adapter *adap)
1047 {
1048         struct sge_uld_rxq_info *rxq_info = NULL;
1049         struct sge *s = &adap->sge;
1050         unsigned int cmplqid = 0;
1051         int err, i, j, msix = 0;
1052
1053         if (is_uld(adap))
1054                 rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
1055
1056         if (!(adap->flags & CXGB4_USING_MSIX))
1057                 msix = -((int)s->intrq.abs_id + 1);
1058
1059         for_each_port(adap, i) {
1060                 struct net_device *dev = adap->port[i];
1061                 struct port_info *pi = netdev_priv(dev);
1062                 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1063                 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1064
1065                 for (j = 0; j < pi->nqsets; j++, q++) {
1066                         if (msix >= 0) {
1067                                 msix = cxgb4_get_msix_idx_from_bmap(adap);
1068                                 if (msix < 0) {
1069                                         err = msix;
1070                                         goto freeout;
1071                                 }
1072
1073                                 snprintf(adap->msix_info[msix].desc,
1074                                          sizeof(adap->msix_info[msix].desc),
1075                                          "%s-Rx%d", dev->name, j);
1076                                 q->msix = &adap->msix_info[msix];
1077                         }
1078
1079                         err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1080                                                msix, &q->fl,
1081                                                t4_ethrx_handler,
1082                                                NULL,
1083                                                t4_get_tp_ch_map(adap,
1084                                                                 pi->tx_chan));
1085                         if (err)
1086                                 goto freeout;
1087                         q->rspq.idx = j;
1088                         memset(&q->stats, 0, sizeof(q->stats));
1089                 }
1090
1091                 q = &s->ethrxq[pi->first_qset];
1092                 for (j = 0; j < pi->nqsets; j++, t++, q++) {
1093                         err = t4_sge_alloc_eth_txq(adap, t, dev,
1094                                         netdev_get_tx_queue(dev, j),
1095                                         q->rspq.cntxt_id,
1096                                         !!(adap->flags & CXGB4_SGE_DBQ_TIMER));
1097                         if (err)
1098                                 goto freeout;
1099                 }
1100         }
1101
1102         for_each_port(adap, i) {
1103                 /* Note that cmplqid below is 0 if we don't
1104                  * have RDMA queues, and that's the right value.
1105                  */
1106                 if (rxq_info)
1107                         cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
1108
1109                 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1110                                             s->fw_evtq.cntxt_id, cmplqid);
1111                 if (err)
1112                         goto freeout;
1113         }
1114
1115         if (!is_t4(adap->params.chip)) {
1116                 err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
1117                                            netdev_get_tx_queue(adap->port[0], 0)
1118                                            , s->fw_evtq.cntxt_id, false);
1119                 if (err)
1120                         goto freeout;
1121         }
1122
1123         t4_write_reg(adap, is_t4(adap->params.chip) ?
1124                                 MPS_TRC_RSS_CONTROL_A :
1125                                 MPS_T5_TRC_RSS_CONTROL_A,
1126                      RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
1127                      QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
1128         return 0;
1129 freeout:
1130         dev_err(adap->pdev_dev, "Can't allocate queues, err=%d\n", -err);
1131         t4_free_sge_resources(adap);
1132         return err;
1133 }
1134
1135 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1136                              struct net_device *sb_dev)
1137 {
1138         int txq;
1139
1140 #ifdef CONFIG_CHELSIO_T4_DCB
1141         /* If a Data Center Bridging has been successfully negotiated on this
1142          * link then we'll use the skb's priority to map it to a TX Queue.
1143          * The skb's priority is determined via the VLAN Tag Priority Code
1144          * Point field.
1145          */
1146         if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
1147                 u16 vlan_tci;
1148                 int err;
1149
1150                 err = vlan_get_tag(skb, &vlan_tci);
1151                 if (unlikely(err)) {
1152                         if (net_ratelimit())
1153                                 netdev_warn(dev,
1154                                             "TX Packet without VLAN Tag on DCB Link\n");
1155                         txq = 0;
1156                 } else {
1157                         txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1158 #ifdef CONFIG_CHELSIO_T4_FCOE
1159                         if (skb->protocol == htons(ETH_P_FCOE))
1160                                 txq = skb->priority & 0x7;
1161 #endif /* CONFIG_CHELSIO_T4_FCOE */
1162                 }
1163                 return txq;
1164         }
1165 #endif /* CONFIG_CHELSIO_T4_DCB */
1166
1167         if (dev->num_tc) {
1168                 struct port_info *pi = netdev2pinfo(dev);
1169                 u8 ver, proto;
1170
1171                 ver = ip_hdr(skb)->version;
1172                 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr :
1173                                      ip_hdr(skb)->protocol;
1174
1175                 /* Send unsupported traffic pattern to normal NIC queues. */
1176                 txq = netdev_pick_tx(dev, skb, sb_dev);
1177                 if (xfrm_offload(skb) || is_ptp_enabled(skb, dev) ||
1178                     skb->encapsulation ||
1179                     cxgb4_is_ktls_skb(skb) ||
1180                     (proto != IPPROTO_TCP && proto != IPPROTO_UDP))
1181                         txq = txq % pi->nqsets;
1182
1183                 return txq;
1184         }
1185
1186         if (select_queue) {
1187                 txq = (skb_rx_queue_recorded(skb)
1188                         ? skb_get_rx_queue(skb)
1189                         : smp_processor_id());
1190
1191                 while (unlikely(txq >= dev->real_num_tx_queues))
1192                         txq -= dev->real_num_tx_queues;
1193
1194                 return txq;
1195         }
1196
1197         return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
1198 }
1199
1200 static int closest_timer(const struct sge *s, int time)
1201 {
1202         int i, delta, match = 0, min_delta = INT_MAX;
1203
1204         for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1205                 delta = time - s->timer_val[i];
1206                 if (delta < 0)
1207                         delta = -delta;
1208                 if (delta < min_delta) {
1209                         min_delta = delta;
1210                         match = i;
1211                 }
1212         }
1213         return match;
1214 }
1215
1216 static int closest_thres(const struct sge *s, int thres)
1217 {
1218         int i, delta, match = 0, min_delta = INT_MAX;
1219
1220         for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1221                 delta = thres - s->counter_val[i];
1222                 if (delta < 0)
1223                         delta = -delta;
1224                 if (delta < min_delta) {
1225                         min_delta = delta;
1226                         match = i;
1227                 }
1228         }
1229         return match;
1230 }
1231
1232 /**
1233  *      cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
1234  *      @q: the Rx queue
1235  *      @us: the hold-off time in us, or 0 to disable timer
1236  *      @cnt: the hold-off packet count, or 0 to disable counter
1237  *
1238  *      Sets an Rx queue's interrupt hold-off time and packet count.  At least
1239  *      one of the two needs to be enabled for the queue to generate interrupts.
1240  */
1241 int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
1242                                unsigned int us, unsigned int cnt)
1243 {
1244         struct adapter *adap = q->adap;
1245
1246         if ((us | cnt) == 0)
1247                 cnt = 1;
1248
1249         if (cnt) {
1250                 int err;
1251                 u32 v, new_idx;
1252
1253                 new_idx = closest_thres(&adap->sge, cnt);
1254                 if (q->desc && q->pktcnt_idx != new_idx) {
1255                         /* the queue has already been created, update it */
1256                         v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1257                             FW_PARAMS_PARAM_X_V(
1258                                         FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1259                             FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
1260                         err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
1261                                             &v, &new_idx);
1262                         if (err)
1263                                 return err;
1264                 }
1265                 q->pktcnt_idx = new_idx;
1266         }
1267
1268         us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1269         q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
1270         return 0;
1271 }
1272
1273 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1274 {
1275         netdev_features_t changed = dev->features ^ features;
1276         const struct port_info *pi = netdev_priv(dev);
1277         int err;
1278
1279         if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
1280                 return 0;
1281
1282         err = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
1283                             pi->viid_mirror, -1, -1, -1, -1,
1284                             !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
1285         if (unlikely(err))
1286                 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
1287         return err;
1288 }
1289
1290 static int setup_debugfs(struct adapter *adap)
1291 {
1292         if (IS_ERR_OR_NULL(adap->debugfs_root))
1293                 return -1;
1294
1295 #ifdef CONFIG_DEBUG_FS
1296         t4_setup_debugfs(adap);
1297 #endif
1298         return 0;
1299 }
1300
1301 static void cxgb4_port_mirror_free_rxq(struct adapter *adap,
1302                                        struct sge_eth_rxq *mirror_rxq)
1303 {
1304         if ((adap->flags & CXGB4_FULL_INIT_DONE) &&
1305             !(adap->flags & CXGB4_SHUTTING_DOWN))
1306                 cxgb4_quiesce_rx(&mirror_rxq->rspq);
1307
1308         if (adap->flags & CXGB4_USING_MSIX) {
1309                 cxgb4_clear_msix_aff(mirror_rxq->msix->vec,
1310                                      mirror_rxq->msix->aff_mask);
1311                 free_irq(mirror_rxq->msix->vec, &mirror_rxq->rspq);
1312                 cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
1313         }
1314
1315         free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
1316 }
1317
1318 static int cxgb4_port_mirror_alloc_queues(struct net_device *dev)
1319 {
1320         struct port_info *pi = netdev2pinfo(dev);
1321         struct adapter *adap = netdev2adap(dev);
1322         struct sge_eth_rxq *mirror_rxq;
1323         struct sge *s = &adap->sge;
1324         int ret = 0, msix = 0;
1325         u16 i, rxqid;
1326         u16 *rss;
1327
1328         if (!pi->vi_mirror_count)
1329                 return 0;
1330
1331         if (s->mirror_rxq[pi->port_id])
1332                 return 0;
1333
1334         mirror_rxq = kcalloc(pi->nmirrorqsets, sizeof(*mirror_rxq), GFP_KERNEL);
1335         if (!mirror_rxq)
1336                 return -ENOMEM;
1337
1338         s->mirror_rxq[pi->port_id] = mirror_rxq;
1339
1340         if (!(adap->flags & CXGB4_USING_MSIX))
1341                 msix = -((int)adap->sge.intrq.abs_id + 1);
1342
1343         for (i = 0, rxqid = 0; i < pi->nmirrorqsets; i++, rxqid++) {
1344                 mirror_rxq = &s->mirror_rxq[pi->port_id][i];
1345
1346                 /* Allocate Mirror Rxqs */
1347                 if (msix >= 0) {
1348                         msix = cxgb4_get_msix_idx_from_bmap(adap);
1349                         if (msix < 0) {
1350                                 ret = msix;
1351                                 goto out_free_queues;
1352                         }
1353
1354                         mirror_rxq->msix = &adap->msix_info[msix];
1355                         snprintf(mirror_rxq->msix->desc,
1356                                  sizeof(mirror_rxq->msix->desc),
1357                                  "%s-mirrorrxq%d", dev->name, i);
1358                 }
1359
1360                 init_rspq(adap, &mirror_rxq->rspq,
1361                           CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC,
1362                           CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT,
1363                           CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM,
1364                           CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE);
1365
1366                 mirror_rxq->fl.size = CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM;
1367
1368                 ret = t4_sge_alloc_rxq(adap, &mirror_rxq->rspq, false,
1369                                        dev, msix, &mirror_rxq->fl,
1370                                        t4_ethrx_handler, NULL, 0);
1371                 if (ret)
1372                         goto out_free_msix_idx;
1373
1374                 /* Setup MSI-X vectors for Mirror Rxqs */
1375                 if (adap->flags & CXGB4_USING_MSIX) {
1376                         ret = request_irq(mirror_rxq->msix->vec,
1377                                           t4_sge_intr_msix, 0,
1378                                           mirror_rxq->msix->desc,
1379                                           &mirror_rxq->rspq);
1380                         if (ret)
1381                                 goto out_free_rxq;
1382
1383                         cxgb4_set_msix_aff(adap, mirror_rxq->msix->vec,
1384                                            &mirror_rxq->msix->aff_mask, i);
1385                 }
1386
1387                 /* Start NAPI for Mirror Rxqs */
1388                 cxgb4_enable_rx(adap, &mirror_rxq->rspq);
1389         }
1390
1391         /* Setup RSS for Mirror Rxqs */
1392         rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
1393         if (!rss) {
1394                 ret = -ENOMEM;
1395                 goto out_free_queues;
1396         }
1397
1398         mirror_rxq = &s->mirror_rxq[pi->port_id][0];
1399         for (i = 0; i < pi->rss_size; i++)
1400                 rss[i] = mirror_rxq[i % pi->nmirrorqsets].rspq.abs_id;
1401
1402         ret = cxgb4_config_rss(pi, rss, pi->rss_size, pi->viid_mirror);
1403         kfree(rss);
1404         if (ret)
1405                 goto out_free_queues;
1406
1407         return 0;
1408
1409 out_free_rxq:
1410         free_rspq_fl(adap, &mirror_rxq->rspq, &mirror_rxq->fl);
1411
1412 out_free_msix_idx:
1413         cxgb4_free_msix_idx_in_bmap(adap, mirror_rxq->msix->idx);
1414
1415 out_free_queues:
1416         while (rxqid-- > 0)
1417                 cxgb4_port_mirror_free_rxq(adap,
1418                                            &s->mirror_rxq[pi->port_id][rxqid]);
1419
1420         kfree(s->mirror_rxq[pi->port_id]);
1421         s->mirror_rxq[pi->port_id] = NULL;
1422         return ret;
1423 }
1424
1425 static void cxgb4_port_mirror_free_queues(struct net_device *dev)
1426 {
1427         struct port_info *pi = netdev2pinfo(dev);
1428         struct adapter *adap = netdev2adap(dev);
1429         struct sge *s = &adap->sge;
1430         u16 i;
1431
1432         if (!pi->vi_mirror_count)
1433                 return;
1434
1435         if (!s->mirror_rxq[pi->port_id])
1436                 return;
1437
1438         for (i = 0; i < pi->nmirrorqsets; i++)
1439                 cxgb4_port_mirror_free_rxq(adap,
1440                                            &s->mirror_rxq[pi->port_id][i]);
1441
1442         kfree(s->mirror_rxq[pi->port_id]);
1443         s->mirror_rxq[pi->port_id] = NULL;
1444 }
1445
1446 static int cxgb4_port_mirror_start(struct net_device *dev)
1447 {
1448         struct port_info *pi = netdev2pinfo(dev);
1449         struct adapter *adap = netdev2adap(dev);
1450         int ret, idx = -1;
1451
1452         if (!pi->vi_mirror_count)
1453                 return 0;
1454
1455         /* Mirror VIs can be created dynamically after stack had
1456          * already setup Rx modes like MTU, promisc, allmulti, etc.
1457          * on main VI. So, parse what the stack had setup on the
1458          * main VI and update the same on the mirror VI.
1459          */
1460         ret = t4_set_rxmode(adap, adap->mbox, pi->viid, pi->viid_mirror,
1461                             dev->mtu, (dev->flags & IFF_PROMISC) ? 1 : 0,
1462                             (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
1463                             !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
1464         if (ret) {
1465                 dev_err(adap->pdev_dev,
1466                         "Failed start up Rx mode for Mirror VI 0x%x, ret: %d\n",
1467                         pi->viid_mirror, ret);
1468                 return ret;
1469         }
1470
1471         /* Enable replication bit for the device's MAC address
1472          * in MPS TCAM, so that the packets for the main VI are
1473          * replicated to mirror VI.
1474          */
1475         ret = cxgb4_update_mac_filt(pi, pi->viid_mirror, &idx,
1476                                     dev->dev_addr, true, NULL);
1477         if (ret) {
1478                 dev_err(adap->pdev_dev,
1479                         "Failed updating MAC filter for Mirror VI 0x%x, ret: %d\n",
1480                         pi->viid_mirror, ret);
1481                 return ret;
1482         }
1483
1484         /* Enabling a Virtual Interface can result in an interrupt
1485          * during the processing of the VI Enable command and, in some
1486          * paths, result in an attempt to issue another command in the
1487          * interrupt context. Thus, we disable interrupts during the
1488          * course of the VI Enable command ...
1489          */
1490         local_bh_disable();
1491         ret = t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, true, true,
1492                                   false);
1493         local_bh_enable();
1494         if (ret)
1495                 dev_err(adap->pdev_dev,
1496                         "Failed starting Mirror VI 0x%x, ret: %d\n",
1497                         pi->viid_mirror, ret);
1498
1499         return ret;
1500 }
1501
1502 static void cxgb4_port_mirror_stop(struct net_device *dev)
1503 {
1504         struct port_info *pi = netdev2pinfo(dev);
1505         struct adapter *adap = netdev2adap(dev);
1506
1507         if (!pi->vi_mirror_count)
1508                 return;
1509
1510         t4_enable_vi_params(adap, adap->mbox, pi->viid_mirror, false, false,
1511                             false);
1512 }
1513
1514 int cxgb4_port_mirror_alloc(struct net_device *dev)
1515 {
1516         struct port_info *pi = netdev2pinfo(dev);
1517         struct adapter *adap = netdev2adap(dev);
1518         int ret = 0;
1519
1520         if (!pi->nmirrorqsets)
1521                 return -EOPNOTSUPP;
1522
1523         mutex_lock(&pi->vi_mirror_mutex);
1524         if (pi->viid_mirror) {
1525                 pi->vi_mirror_count++;
1526                 goto out_unlock;
1527         }
1528
1529         ret = t4_init_port_mirror(pi, adap->mbox, pi->port_id, adap->pf, 0,
1530                                   &pi->viid_mirror);
1531         if (ret)
1532                 goto out_unlock;
1533
1534         pi->vi_mirror_count = 1;
1535
1536         if (adap->flags & CXGB4_FULL_INIT_DONE) {
1537                 ret = cxgb4_port_mirror_alloc_queues(dev);
1538                 if (ret)
1539                         goto out_free_vi;
1540
1541                 ret = cxgb4_port_mirror_start(dev);
1542                 if (ret)
1543                         goto out_free_queues;
1544         }
1545
1546         mutex_unlock(&pi->vi_mirror_mutex);
1547         return 0;
1548
1549 out_free_queues:
1550         cxgb4_port_mirror_free_queues(dev);
1551
1552 out_free_vi:
1553         pi->vi_mirror_count = 0;
1554         t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
1555         pi->viid_mirror = 0;
1556
1557 out_unlock:
1558         mutex_unlock(&pi->vi_mirror_mutex);
1559         return ret;
1560 }
1561
1562 void cxgb4_port_mirror_free(struct net_device *dev)
1563 {
1564         struct port_info *pi = netdev2pinfo(dev);
1565         struct adapter *adap = netdev2adap(dev);
1566
1567         mutex_lock(&pi->vi_mirror_mutex);
1568         if (!pi->viid_mirror)
1569                 goto out_unlock;
1570
1571         if (pi->vi_mirror_count > 1) {
1572                 pi->vi_mirror_count--;
1573                 goto out_unlock;
1574         }
1575
1576         cxgb4_port_mirror_stop(dev);
1577         cxgb4_port_mirror_free_queues(dev);
1578
1579         pi->vi_mirror_count = 0;
1580         t4_free_vi(adap, adap->mbox, adap->pf, 0, pi->viid_mirror);
1581         pi->viid_mirror = 0;
1582
1583 out_unlock:
1584         mutex_unlock(&pi->vi_mirror_mutex);
1585 }
1586
1587 /*
1588  * upper-layer driver support
1589  */
1590
1591 /*
1592  * Allocate an active-open TID and set it to the supplied value.
1593  */
1594 int cxgb4_alloc_atid(struct tid_info *t, void *data)
1595 {
1596         int atid = -1;
1597
1598         spin_lock_bh(&t->atid_lock);
1599         if (t->afree) {
1600                 union aopen_entry *p = t->afree;
1601
1602                 atid = (p - t->atid_tab) + t->atid_base;
1603                 t->afree = p->next;
1604                 p->data = data;
1605                 t->atids_in_use++;
1606         }
1607         spin_unlock_bh(&t->atid_lock);
1608         return atid;
1609 }
1610 EXPORT_SYMBOL(cxgb4_alloc_atid);
1611
1612 /*
1613  * Release an active-open TID.
1614  */
1615 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
1616 {
1617         union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
1618
1619         spin_lock_bh(&t->atid_lock);
1620         p->next = t->afree;
1621         t->afree = p;
1622         t->atids_in_use--;
1623         spin_unlock_bh(&t->atid_lock);
1624 }
1625 EXPORT_SYMBOL(cxgb4_free_atid);
1626
1627 /*
1628  * Allocate a server TID and set it to the supplied value.
1629  */
1630 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
1631 {
1632         int stid;
1633
1634         spin_lock_bh(&t->stid_lock);
1635         if (family == PF_INET) {
1636                 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
1637                 if (stid < t->nstids)
1638                         __set_bit(stid, t->stid_bmap);
1639                 else
1640                         stid = -1;
1641         } else {
1642                 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
1643                 if (stid < 0)
1644                         stid = -1;
1645         }
1646         if (stid >= 0) {
1647                 t->stid_tab[stid].data = data;
1648                 stid += t->stid_base;
1649                 /* IPv6 requires max of 520 bits or 16 cells in TCAM
1650                  * This is equivalent to 4 TIDs. With CLIP enabled it
1651                  * needs 2 TIDs.
1652                  */
1653                 if (family == PF_INET6) {
1654                         t->stids_in_use += 2;
1655                         t->v6_stids_in_use += 2;
1656                 } else {
1657                         t->stids_in_use++;
1658                 }
1659         }
1660         spin_unlock_bh(&t->stid_lock);
1661         return stid;
1662 }
1663 EXPORT_SYMBOL(cxgb4_alloc_stid);
1664
1665 /* Allocate a server filter TID and set it to the supplied value.
1666  */
1667 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
1668 {
1669         int stid;
1670
1671         spin_lock_bh(&t->stid_lock);
1672         if (family == PF_INET) {
1673                 stid = find_next_zero_bit(t->stid_bmap,
1674                                 t->nstids + t->nsftids, t->nstids);
1675                 if (stid < (t->nstids + t->nsftids))
1676                         __set_bit(stid, t->stid_bmap);
1677                 else
1678                         stid = -1;
1679         } else {
1680                 stid = -1;
1681         }
1682         if (stid >= 0) {
1683                 t->stid_tab[stid].data = data;
1684                 stid -= t->nstids;
1685                 stid += t->sftid_base;
1686                 t->sftids_in_use++;
1687         }
1688         spin_unlock_bh(&t->stid_lock);
1689         return stid;
1690 }
1691 EXPORT_SYMBOL(cxgb4_alloc_sftid);
1692
1693 /* Release a server TID.
1694  */
1695 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
1696 {
1697         /* Is it a server filter TID? */
1698         if (t->nsftids && (stid >= t->sftid_base)) {
1699                 stid -= t->sftid_base;
1700                 stid += t->nstids;
1701         } else {
1702                 stid -= t->stid_base;
1703         }
1704
1705         spin_lock_bh(&t->stid_lock);
1706         if (family == PF_INET)
1707                 __clear_bit(stid, t->stid_bmap);
1708         else
1709                 bitmap_release_region(t->stid_bmap, stid, 1);
1710         t->stid_tab[stid].data = NULL;
1711         if (stid < t->nstids) {
1712                 if (family == PF_INET6) {
1713                         t->stids_in_use -= 2;
1714                         t->v6_stids_in_use -= 2;
1715                 } else {
1716                         t->stids_in_use--;
1717                 }
1718         } else {
1719                 t->sftids_in_use--;
1720         }
1721
1722         spin_unlock_bh(&t->stid_lock);
1723 }
1724 EXPORT_SYMBOL(cxgb4_free_stid);
1725
1726 /*
1727  * Populate a TID_RELEASE WR.  Caller must properly size the skb.
1728  */
1729 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
1730                            unsigned int tid)
1731 {
1732         struct cpl_tid_release *req;
1733
1734         set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
1735         req = __skb_put(skb, sizeof(*req));
1736         INIT_TP_WR(req, tid);
1737         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
1738 }
1739
1740 /*
1741  * Queue a TID release request and if necessary schedule a work queue to
1742  * process it.
1743  */
1744 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
1745                                     unsigned int tid)
1746 {
1747         struct adapter *adap = container_of(t, struct adapter, tids);
1748         void **p = &t->tid_tab[tid - t->tid_base];
1749
1750         spin_lock_bh(&adap->tid_release_lock);
1751         *p = adap->tid_release_head;
1752         /* Low 2 bits encode the Tx channel number */
1753         adap->tid_release_head = (void **)((uintptr_t)p | chan);
1754         if (!adap->tid_release_task_busy) {
1755                 adap->tid_release_task_busy = true;
1756                 queue_work(adap->workq, &adap->tid_release_task);
1757         }
1758         spin_unlock_bh(&adap->tid_release_lock);
1759 }
1760
1761 /*
1762  * Process the list of pending TID release requests.
1763  */
1764 static void process_tid_release_list(struct work_struct *work)
1765 {
1766         struct sk_buff *skb;
1767         struct adapter *adap;
1768
1769         adap = container_of(work, struct adapter, tid_release_task);
1770
1771         spin_lock_bh(&adap->tid_release_lock);
1772         while (adap->tid_release_head) {
1773                 void **p = adap->tid_release_head;
1774                 unsigned int chan = (uintptr_t)p & 3;
1775                 p = (void *)p - chan;
1776
1777                 adap->tid_release_head = *p;
1778                 *p = NULL;
1779                 spin_unlock_bh(&adap->tid_release_lock);
1780
1781                 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
1782                                          GFP_KERNEL)))
1783                         schedule_timeout_uninterruptible(1);
1784
1785                 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
1786                 t4_ofld_send(adap, skb);
1787                 spin_lock_bh(&adap->tid_release_lock);
1788         }
1789         adap->tid_release_task_busy = false;
1790         spin_unlock_bh(&adap->tid_release_lock);
1791 }
1792
1793 /*
1794  * Release a TID and inform HW.  If we are unable to allocate the release
1795  * message we defer to a work queue.
1796  */
1797 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
1798                       unsigned short family)
1799 {
1800         struct adapter *adap = container_of(t, struct adapter, tids);
1801         struct sk_buff *skb;
1802
1803         WARN_ON(tid_out_of_range(&adap->tids, tid));
1804
1805         if (t->tid_tab[tid - adap->tids.tid_base]) {
1806                 t->tid_tab[tid - adap->tids.tid_base] = NULL;
1807                 atomic_dec(&t->conns_in_use);
1808                 if (t->hash_base && (tid >= t->hash_base)) {
1809                         if (family == AF_INET6)
1810                                 atomic_sub(2, &t->hash_tids_in_use);
1811                         else
1812                                 atomic_dec(&t->hash_tids_in_use);
1813                 } else {
1814                         if (family == AF_INET6)
1815                                 atomic_sub(2, &t->tids_in_use);
1816                         else
1817                                 atomic_dec(&t->tids_in_use);
1818                 }
1819         }
1820
1821         skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
1822         if (likely(skb)) {
1823                 mk_tid_release(skb, chan, tid);
1824                 t4_ofld_send(adap, skb);
1825         } else
1826                 cxgb4_queue_tid_release(t, chan, tid);
1827 }
1828 EXPORT_SYMBOL(cxgb4_remove_tid);
1829
1830 /*
1831  * Allocate and initialize the TID tables.  Returns 0 on success.
1832  */
1833 static int tid_init(struct tid_info *t)
1834 {
1835         struct adapter *adap = container_of(t, struct adapter, tids);
1836         unsigned int max_ftids = t->nftids + t->nsftids;
1837         unsigned int natids = t->natids;
1838         unsigned int hpftid_bmap_size;
1839         unsigned int eotid_bmap_size;
1840         unsigned int stid_bmap_size;
1841         unsigned int ftid_bmap_size;
1842         size_t size;
1843
1844         stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
1845         ftid_bmap_size = BITS_TO_LONGS(t->nftids);
1846         hpftid_bmap_size = BITS_TO_LONGS(t->nhpftids);
1847         eotid_bmap_size = BITS_TO_LONGS(t->neotids);
1848         size = t->ntids * sizeof(*t->tid_tab) +
1849                natids * sizeof(*t->atid_tab) +
1850                t->nstids * sizeof(*t->stid_tab) +
1851                t->nsftids * sizeof(*t->stid_tab) +
1852                stid_bmap_size * sizeof(long) +
1853                t->nhpftids * sizeof(*t->hpftid_tab) +
1854                hpftid_bmap_size * sizeof(long) +
1855                max_ftids * sizeof(*t->ftid_tab) +
1856                ftid_bmap_size * sizeof(long) +
1857                t->neotids * sizeof(*t->eotid_tab) +
1858                eotid_bmap_size * sizeof(long);
1859
1860         t->tid_tab = kvzalloc(size, GFP_KERNEL);
1861         if (!t->tid_tab)
1862                 return -ENOMEM;
1863
1864         t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
1865         t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
1866         t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
1867         t->hpftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
1868         t->hpftid_bmap = (unsigned long *)&t->hpftid_tab[t->nhpftids];
1869         t->ftid_tab = (struct filter_entry *)&t->hpftid_bmap[hpftid_bmap_size];
1870         t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
1871         t->eotid_tab = (struct eotid_entry *)&t->ftid_bmap[ftid_bmap_size];
1872         t->eotid_bmap = (unsigned long *)&t->eotid_tab[t->neotids];
1873         spin_lock_init(&t->stid_lock);
1874         spin_lock_init(&t->atid_lock);
1875         spin_lock_init(&t->ftid_lock);
1876
1877         t->stids_in_use = 0;
1878         t->v6_stids_in_use = 0;
1879         t->sftids_in_use = 0;
1880         t->afree = NULL;
1881         t->atids_in_use = 0;
1882         atomic_set(&t->tids_in_use, 0);
1883         atomic_set(&t->conns_in_use, 0);
1884         atomic_set(&t->hash_tids_in_use, 0);
1885         atomic_set(&t->eotids_in_use, 0);
1886
1887         /* Setup the free list for atid_tab and clear the stid bitmap. */
1888         if (natids) {
1889                 while (--natids)
1890                         t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1891                 t->afree = t->atid_tab;
1892         }
1893
1894         if (is_offload(adap)) {
1895                 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
1896                 /* Reserve stid 0 for T4/T5 adapters */
1897                 if (!t->stid_base &&
1898                     CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
1899                         __set_bit(0, t->stid_bmap);
1900
1901                 if (t->neotids)
1902                         bitmap_zero(t->eotid_bmap, t->neotids);
1903         }
1904
1905         if (t->nhpftids)
1906                 bitmap_zero(t->hpftid_bmap, t->nhpftids);
1907         bitmap_zero(t->ftid_bmap, t->nftids);
1908         return 0;
1909 }
1910
1911 /**
1912  *      cxgb4_create_server - create an IP server
1913  *      @dev: the device
1914  *      @stid: the server TID
1915  *      @sip: local IP address to bind server to
1916  *      @sport: the server's TCP port
1917  *      @vlan: the VLAN header information
1918  *      @queue: queue to direct messages from this server to
1919  *
1920  *      Create an IP server for the given port and address.
1921  *      Returns <0 on error and one of the %NET_XMIT_* values on success.
1922  */
1923 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
1924                         __be32 sip, __be16 sport, __be16 vlan,
1925                         unsigned int queue)
1926 {
1927         unsigned int chan;
1928         struct sk_buff *skb;
1929         struct adapter *adap;
1930         struct cpl_pass_open_req *req;
1931         int ret;
1932
1933         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1934         if (!skb)
1935                 return -ENOMEM;
1936
1937         adap = netdev2adap(dev);
1938         req = __skb_put(skb, sizeof(*req));
1939         INIT_TP_WR(req, 0);
1940         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
1941         req->local_port = sport;
1942         req->peer_port = htons(0);
1943         req->local_ip = sip;
1944         req->peer_ip = htonl(0);
1945         chan = rxq_to_chan(&adap->sge, queue);
1946         req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1947         req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1948                                 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1949         ret = t4_mgmt_tx(adap, skb);
1950         return net_xmit_eval(ret);
1951 }
1952 EXPORT_SYMBOL(cxgb4_create_server);
1953
1954 /*      cxgb4_create_server6 - create an IPv6 server
1955  *      @dev: the device
1956  *      @stid: the server TID
1957  *      @sip: local IPv6 address to bind server to
1958  *      @sport: the server's TCP port
1959  *      @queue: queue to direct messages from this server to
1960  *
1961  *      Create an IPv6 server for the given port and address.
1962  *      Returns <0 on error and one of the %NET_XMIT_* values on success.
1963  */
1964 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
1965                          const struct in6_addr *sip, __be16 sport,
1966                          unsigned int queue)
1967 {
1968         unsigned int chan;
1969         struct sk_buff *skb;
1970         struct adapter *adap;
1971         struct cpl_pass_open_req6 *req;
1972         int ret;
1973
1974         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
1975         if (!skb)
1976                 return -ENOMEM;
1977
1978         adap = netdev2adap(dev);
1979         req = __skb_put(skb, sizeof(*req));
1980         INIT_TP_WR(req, 0);
1981         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
1982         req->local_port = sport;
1983         req->peer_port = htons(0);
1984         req->local_ip_hi = *(__be64 *)(sip->s6_addr);
1985         req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
1986         req->peer_ip_hi = cpu_to_be64(0);
1987         req->peer_ip_lo = cpu_to_be64(0);
1988         chan = rxq_to_chan(&adap->sge, queue);
1989         req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
1990         req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
1991                                 SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
1992         ret = t4_mgmt_tx(adap, skb);
1993         return net_xmit_eval(ret);
1994 }
1995 EXPORT_SYMBOL(cxgb4_create_server6);
1996
1997 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
1998                         unsigned int queue, bool ipv6)
1999 {
2000         struct sk_buff *skb;
2001         struct adapter *adap;
2002         struct cpl_close_listsvr_req *req;
2003         int ret;
2004
2005         adap = netdev2adap(dev);
2006
2007         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2008         if (!skb)
2009                 return -ENOMEM;
2010
2011         req = __skb_put(skb, sizeof(*req));
2012         INIT_TP_WR(req, 0);
2013         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
2014         req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
2015                                 LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
2016         ret = t4_mgmt_tx(adap, skb);
2017         return net_xmit_eval(ret);
2018 }
2019 EXPORT_SYMBOL(cxgb4_remove_server);
2020
2021 /**
2022  *      cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2023  *      @mtus: the HW MTU table
2024  *      @mtu: the target MTU
2025  *      @idx: index of selected entry in the MTU table
2026  *
2027  *      Returns the index and the value in the HW MTU table that is closest to
2028  *      but does not exceed @mtu, unless @mtu is smaller than any value in the
2029  *      table, in which case that smallest available value is selected.
2030  */
2031 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2032                             unsigned int *idx)
2033 {
2034         unsigned int i = 0;
2035
2036         while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2037                 ++i;
2038         if (idx)
2039                 *idx = i;
2040         return mtus[i];
2041 }
2042 EXPORT_SYMBOL(cxgb4_best_mtu);
2043
2044 /**
2045  *     cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
2046  *     @mtus: the HW MTU table
2047  *     @header_size: Header Size
2048  *     @data_size_max: maximum Data Segment Size
2049  *     @data_size_align: desired Data Segment Size Alignment (2^N)
2050  *     @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
2051  *
2052  *     Similar to cxgb4_best_mtu() but instead of searching the Hardware
2053  *     MTU Table based solely on a Maximum MTU parameter, we break that
2054  *     parameter up into a Header Size and Maximum Data Segment Size, and
2055  *     provide a desired Data Segment Size Alignment.  If we find an MTU in
2056  *     the Hardware MTU Table which will result in a Data Segment Size with
2057  *     the requested alignment _and_ that MTU isn't "too far" from the
2058  *     closest MTU, then we'll return that rather than the closest MTU.
2059  */
2060 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
2061                                     unsigned short header_size,
2062                                     unsigned short data_size_max,
2063                                     unsigned short data_size_align,
2064                                     unsigned int *mtu_idxp)
2065 {
2066         unsigned short max_mtu = header_size + data_size_max;
2067         unsigned short data_size_align_mask = data_size_align - 1;
2068         int mtu_idx, aligned_mtu_idx;
2069
2070         /* Scan the MTU Table till we find an MTU which is larger than our
2071          * Maximum MTU or we reach the end of the table.  Along the way,
2072          * record the last MTU found, if any, which will result in a Data
2073          * Segment Length matching the requested alignment.
2074          */
2075         for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
2076                 unsigned short data_size = mtus[mtu_idx] - header_size;
2077
2078                 /* If this MTU minus the Header Size would result in a
2079                  * Data Segment Size of the desired alignment, remember it.
2080                  */
2081                 if ((data_size & data_size_align_mask) == 0)
2082                         aligned_mtu_idx = mtu_idx;
2083
2084                 /* If we're not at the end of the Hardware MTU Table and the
2085                  * next element is larger than our Maximum MTU, drop out of
2086                  * the loop.
2087                  */
2088                 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
2089                         break;
2090         }
2091
2092         /* If we fell out of the loop because we ran to the end of the table,
2093          * then we just have to use the last [largest] entry.
2094          */
2095         if (mtu_idx == NMTUS)
2096                 mtu_idx--;
2097
2098         /* If we found an MTU which resulted in the requested Data Segment
2099          * Length alignment and that's "not far" from the largest MTU which is
2100          * less than or equal to the maximum MTU, then use that.
2101          */
2102         if (aligned_mtu_idx >= 0 &&
2103             mtu_idx - aligned_mtu_idx <= 1)
2104                 mtu_idx = aligned_mtu_idx;
2105
2106         /* If the caller has passed in an MTU Index pointer, pass the
2107          * MTU Index back.  Return the MTU value.
2108          */
2109         if (mtu_idxp)
2110                 *mtu_idxp = mtu_idx;
2111         return mtus[mtu_idx];
2112 }
2113 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
2114
2115 /**
2116  *      cxgb4_port_chan - get the HW channel of a port
2117  *      @dev: the net device for the port
2118  *
2119  *      Return the HW Tx channel of the given port.
2120  */
2121 unsigned int cxgb4_port_chan(const struct net_device *dev)
2122 {
2123         return netdev2pinfo(dev)->tx_chan;
2124 }
2125 EXPORT_SYMBOL(cxgb4_port_chan);
2126
2127 /**
2128  *      cxgb4_port_e2cchan - get the HW c-channel of a port
2129  *      @dev: the net device for the port
2130  *
2131  *      Return the HW RX c-channel of the given port.
2132  */
2133 unsigned int cxgb4_port_e2cchan(const struct net_device *dev)
2134 {
2135         return netdev2pinfo(dev)->rx_cchan;
2136 }
2137 EXPORT_SYMBOL(cxgb4_port_e2cchan);
2138
2139 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
2140 {
2141         struct adapter *adap = netdev2adap(dev);
2142         u32 v1, v2, lp_count, hp_count;
2143
2144         v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2145         v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2146         if (is_t4(adap->params.chip)) {
2147                 lp_count = LP_COUNT_G(v1);
2148                 hp_count = HP_COUNT_G(v1);
2149         } else {
2150                 lp_count = LP_COUNT_T5_G(v1);
2151                 hp_count = HP_COUNT_T5_G(v2);
2152         }
2153         return lpfifo ? lp_count : hp_count;
2154 }
2155 EXPORT_SYMBOL(cxgb4_dbfifo_count);
2156
2157 /**
2158  *      cxgb4_port_viid - get the VI id of a port
2159  *      @dev: the net device for the port
2160  *
2161  *      Return the VI id of the given port.
2162  */
2163 unsigned int cxgb4_port_viid(const struct net_device *dev)
2164 {
2165         return netdev2pinfo(dev)->viid;
2166 }
2167 EXPORT_SYMBOL(cxgb4_port_viid);
2168
2169 /**
2170  *      cxgb4_port_idx - get the index of a port
2171  *      @dev: the net device for the port
2172  *
2173  *      Return the index of the given port.
2174  */
2175 unsigned int cxgb4_port_idx(const struct net_device *dev)
2176 {
2177         return netdev2pinfo(dev)->port_id;
2178 }
2179 EXPORT_SYMBOL(cxgb4_port_idx);
2180
2181 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2182                          struct tp_tcp_stats *v6)
2183 {
2184         struct adapter *adap = pci_get_drvdata(pdev);
2185
2186         spin_lock(&adap->stats_lock);
2187         t4_tp_get_tcp_stats(adap, v4, v6, false);
2188         spin_unlock(&adap->stats_lock);
2189 }
2190 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2191
2192 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2193                       const unsigned int *pgsz_order)
2194 {
2195         struct adapter *adap = netdev2adap(dev);
2196
2197         t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
2198         t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
2199                      HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
2200                      HPZ3_V(pgsz_order[3]));
2201 }
2202 EXPORT_SYMBOL(cxgb4_iscsi_init);
2203
2204 int cxgb4_flush_eq_cache(struct net_device *dev)
2205 {
2206         struct adapter *adap = netdev2adap(dev);
2207
2208         return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
2209 }
2210 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2211
2212 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2213 {
2214         u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
2215         __be64 indices;
2216         int ret;
2217
2218         spin_lock(&adap->win0_lock);
2219         ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
2220                            sizeof(indices), (__be32 *)&indices,
2221                            T4_MEMORY_READ);
2222         spin_unlock(&adap->win0_lock);
2223         if (!ret) {
2224                 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2225                 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
2226         }
2227         return ret;
2228 }
2229
2230 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2231                         u16 size)
2232 {
2233         struct adapter *adap = netdev2adap(dev);
2234         u16 hw_pidx, hw_cidx;
2235         int ret;
2236
2237         ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2238         if (ret)
2239                 goto out;
2240
2241         if (pidx != hw_pidx) {
2242                 u16 delta;
2243                 u32 val;
2244
2245                 if (pidx >= hw_pidx)
2246                         delta = pidx - hw_pidx;
2247                 else
2248                         delta = size - hw_pidx + pidx;
2249
2250                 if (is_t4(adap->params.chip))
2251                         val = PIDX_V(delta);
2252                 else
2253                         val = PIDX_T5_V(delta);
2254                 wmb();
2255                 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2256                              QID_V(qid) | val);
2257         }
2258 out:
2259         return ret;
2260 }
2261 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2262
2263 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
2264 {
2265         u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
2266         u32 edc0_end, edc1_end, mc0_end, mc1_end;
2267         u32 offset, memtype, memaddr;
2268         struct adapter *adap;
2269         u32 hma_size = 0;
2270         int ret;
2271
2272         adap = netdev2adap(dev);
2273
2274         offset = ((stag >> 8) * 32) + adap->vres.stag.start;
2275
2276         /* Figure out where the offset lands in the Memory Type/Address scheme.
2277          * This code assumes that the memory is laid out starting at offset 0
2278          * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
2279          * and EDC1.  Some cards will have neither MC0 nor MC1, most cards have
2280          * MC0, and some have both MC0 and MC1.
2281          */
2282         size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
2283         edc0_size = EDRAM0_SIZE_G(size) << 20;
2284         size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
2285         edc1_size = EDRAM1_SIZE_G(size) << 20;
2286         size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
2287         mc0_size = EXT_MEM0_SIZE_G(size) << 20;
2288
2289         if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) {
2290                 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2291                 hma_size = EXT_MEM1_SIZE_G(size) << 20;
2292         }
2293         edc0_end = edc0_size;
2294         edc1_end = edc0_end + edc1_size;
2295         mc0_end = edc1_end + mc0_size;
2296
2297         if (offset < edc0_end) {
2298                 memtype = MEM_EDC0;
2299                 memaddr = offset;
2300         } else if (offset < edc1_end) {
2301                 memtype = MEM_EDC1;
2302                 memaddr = offset - edc0_end;
2303         } else {
2304                 if (hma_size && (offset < (edc1_end + hma_size))) {
2305                         memtype = MEM_HMA;
2306                         memaddr = offset - edc1_end;
2307                 } else if (offset < mc0_end) {
2308                         memtype = MEM_MC0;
2309                         memaddr = offset - edc1_end;
2310                 } else if (is_t5(adap->params.chip)) {
2311                         size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
2312                         mc1_size = EXT_MEM1_SIZE_G(size) << 20;
2313                         mc1_end = mc0_end + mc1_size;
2314                         if (offset < mc1_end) {
2315                                 memtype = MEM_MC1;
2316                                 memaddr = offset - mc0_end;
2317                         } else {
2318                                 /* offset beyond the end of any memory */
2319                                 goto err;
2320                         }
2321                 } else {
2322                         /* T4/T6 only has a single memory channel */
2323                         goto err;
2324                 }
2325         }
2326
2327         spin_lock(&adap->win0_lock);
2328         ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
2329         spin_unlock(&adap->win0_lock);
2330         return ret;
2331
2332 err:
2333         dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
2334                 stag, offset);
2335         return -EINVAL;
2336 }
2337 EXPORT_SYMBOL(cxgb4_read_tpte);
2338
2339 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
2340 {
2341         u32 hi, lo;
2342         struct adapter *adap;
2343
2344         adap = netdev2adap(dev);
2345         lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
2346         hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
2347
2348         return ((u64)hi << 32) | (u64)lo;
2349 }
2350 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
2351
2352 int cxgb4_bar2_sge_qregs(struct net_device *dev,
2353                          unsigned int qid,
2354                          enum cxgb4_bar2_qtype qtype,
2355                          int user,
2356                          u64 *pbar2_qoffset,
2357                          unsigned int *pbar2_qid)
2358 {
2359         return t4_bar2_sge_qregs(netdev2adap(dev),
2360                                  qid,
2361                                  (qtype == CXGB4_BAR2_QTYPE_EGRESS
2362                                   ? T4_BAR2_QTYPE_EGRESS
2363                                   : T4_BAR2_QTYPE_INGRESS),
2364                                  user,
2365                                  pbar2_qoffset,
2366                                  pbar2_qid);
2367 }
2368 EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
2369
2370 static struct pci_driver cxgb4_driver;
2371
2372 static void check_neigh_update(struct neighbour *neigh)
2373 {
2374         const struct device *parent;
2375         const struct net_device *netdev = neigh->dev;
2376
2377         if (is_vlan_dev(netdev))
2378                 netdev = vlan_dev_real_dev(netdev);
2379         parent = netdev->dev.parent;
2380         if (parent && parent->driver == &cxgb4_driver.driver)
2381                 t4_l2t_update(dev_get_drvdata(parent), neigh);
2382 }
2383
2384 static int netevent_cb(struct notifier_block *nb, unsigned long event,
2385                        void *data)
2386 {
2387         switch (event) {
2388         case NETEVENT_NEIGH_UPDATE:
2389                 check_neigh_update(data);
2390                 break;
2391         case NETEVENT_REDIRECT:
2392         default:
2393                 break;
2394         }
2395         return 0;
2396 }
2397
2398 static bool netevent_registered;
2399 static struct notifier_block cxgb4_netevent_nb = {
2400         .notifier_call = netevent_cb
2401 };
2402
2403 static void drain_db_fifo(struct adapter *adap, int usecs)
2404 {
2405         u32 v1, v2, lp_count, hp_count;
2406
2407         do {
2408                 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
2409                 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
2410                 if (is_t4(adap->params.chip)) {
2411                         lp_count = LP_COUNT_G(v1);
2412                         hp_count = HP_COUNT_G(v1);
2413                 } else {
2414                         lp_count = LP_COUNT_T5_G(v1);
2415                         hp_count = HP_COUNT_T5_G(v2);
2416                 }
2417
2418                 if (lp_count == 0 && hp_count == 0)
2419                         break;
2420                 set_current_state(TASK_UNINTERRUPTIBLE);
2421                 schedule_timeout(usecs_to_jiffies(usecs));
2422         } while (1);
2423 }
2424
2425 static void disable_txq_db(struct sge_txq *q)
2426 {
2427         unsigned long flags;
2428
2429         spin_lock_irqsave(&q->db_lock, flags);
2430         q->db_disabled = 1;
2431         spin_unlock_irqrestore(&q->db_lock, flags);
2432 }
2433
2434 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
2435 {
2436         spin_lock_irq(&q->db_lock);
2437         if (q->db_pidx_inc) {
2438                 /* Make sure that all writes to the TX descriptors
2439                  * are committed before we tell HW about them.
2440                  */
2441                 wmb();
2442                 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2443                              QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
2444                 q->db_pidx_inc = 0;
2445         }
2446         q->db_disabled = 0;
2447         spin_unlock_irq(&q->db_lock);
2448 }
2449
2450 static void disable_dbs(struct adapter *adap)
2451 {
2452         int i;
2453
2454         for_each_ethrxq(&adap->sge, i)
2455                 disable_txq_db(&adap->sge.ethtxq[i].q);
2456         if (is_offload(adap)) {
2457                 struct sge_uld_txq_info *txq_info =
2458                         adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2459
2460                 if (txq_info) {
2461                         for_each_ofldtxq(&adap->sge, i) {
2462                                 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2463
2464                                 disable_txq_db(&txq->q);
2465                         }
2466                 }
2467         }
2468         for_each_port(adap, i)
2469                 disable_txq_db(&adap->sge.ctrlq[i].q);
2470 }
2471
2472 static void enable_dbs(struct adapter *adap)
2473 {
2474         int i;
2475
2476         for_each_ethrxq(&adap->sge, i)
2477                 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
2478         if (is_offload(adap)) {
2479                 struct sge_uld_txq_info *txq_info =
2480                         adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2481
2482                 if (txq_info) {
2483                         for_each_ofldtxq(&adap->sge, i) {
2484                                 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2485
2486                                 enable_txq_db(adap, &txq->q);
2487                         }
2488                 }
2489         }
2490         for_each_port(adap, i)
2491                 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
2492 }
2493
2494 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2495 {
2496         enum cxgb4_uld type = CXGB4_ULD_RDMA;
2497
2498         if (adap->uld && adap->uld[type].handle)
2499                 adap->uld[type].control(adap->uld[type].handle, cmd);
2500 }
2501
2502 static void process_db_full(struct work_struct *work)
2503 {
2504         struct adapter *adap;
2505
2506         adap = container_of(work, struct adapter, db_full_task);
2507
2508         drain_db_fifo(adap, dbfifo_drain_delay);
2509         enable_dbs(adap);
2510         notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2511         if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2512                 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2513                                  DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
2514                                  DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
2515         else
2516                 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2517                                  DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
2518 }
2519
2520 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2521 {
2522         u16 hw_pidx, hw_cidx;
2523         int ret;
2524
2525         spin_lock_irq(&q->db_lock);
2526         ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2527         if (ret)
2528                 goto out;
2529         if (q->db_pidx != hw_pidx) {
2530                 u16 delta;
2531                 u32 val;
2532
2533                 if (q->db_pidx >= hw_pidx)
2534                         delta = q->db_pidx - hw_pidx;
2535                 else
2536                         delta = q->size - hw_pidx + q->db_pidx;
2537
2538                 if (is_t4(adap->params.chip))
2539                         val = PIDX_V(delta);
2540                 else
2541                         val = PIDX_T5_V(delta);
2542                 wmb();
2543                 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
2544                              QID_V(q->cntxt_id) | val);
2545         }
2546 out:
2547         q->db_disabled = 0;
2548         q->db_pidx_inc = 0;
2549         spin_unlock_irq(&q->db_lock);
2550         if (ret)
2551                 CH_WARN(adap, "DB drop recovery failed.\n");
2552 }
2553
2554 static void recover_all_queues(struct adapter *adap)
2555 {
2556         int i;
2557
2558         for_each_ethrxq(&adap->sge, i)
2559                 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2560         if (is_offload(adap)) {
2561                 struct sge_uld_txq_info *txq_info =
2562                         adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2563                 if (txq_info) {
2564                         for_each_ofldtxq(&adap->sge, i) {
2565                                 struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2566
2567                                 sync_txq_pidx(adap, &txq->q);
2568                         }
2569                 }
2570         }
2571         for_each_port(adap, i)
2572                 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2573 }
2574
2575 static void process_db_drop(struct work_struct *work)
2576 {
2577         struct adapter *adap;
2578
2579         adap = container_of(work, struct adapter, db_drop_task);
2580
2581         if (is_t4(adap->params.chip)) {
2582                 drain_db_fifo(adap, dbfifo_drain_delay);
2583                 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2584                 drain_db_fifo(adap, dbfifo_drain_delay);
2585                 recover_all_queues(adap);
2586                 drain_db_fifo(adap, dbfifo_drain_delay);
2587                 enable_dbs(adap);
2588                 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2589         } else if (is_t5(adap->params.chip)) {
2590                 u32 dropped_db = t4_read_reg(adap, 0x010ac);
2591                 u16 qid = (dropped_db >> 15) & 0x1ffff;
2592                 u16 pidx_inc = dropped_db & 0x1fff;
2593                 u64 bar2_qoffset;
2594                 unsigned int bar2_qid;
2595                 int ret;
2596
2597                 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
2598                                         0, &bar2_qoffset, &bar2_qid);
2599                 if (ret)
2600                         dev_err(adap->pdev_dev, "doorbell drop recovery: "
2601                                 "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
2602                 else
2603                         writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
2604                                adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
2605
2606                 /* Re-enable BAR2 WC */
2607                 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
2608         }
2609
2610         if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
2611                 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
2612 }
2613
2614 void t4_db_full(struct adapter *adap)
2615 {
2616         if (is_t4(adap->params.chip)) {
2617                 disable_dbs(adap);
2618                 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2619                 t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
2620                                  DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
2621                 queue_work(adap->workq, &adap->db_full_task);
2622         }
2623 }
2624
2625 void t4_db_dropped(struct adapter *adap)
2626 {
2627         if (is_t4(adap->params.chip)) {
2628                 disable_dbs(adap);
2629                 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2630         }
2631         queue_work(adap->workq, &adap->db_drop_task);
2632 }
2633
2634 void t4_register_netevent_notifier(void)
2635 {
2636         if (!netevent_registered) {
2637                 register_netevent_notifier(&cxgb4_netevent_nb);
2638                 netevent_registered = true;
2639         }
2640 }
2641
2642 static void detach_ulds(struct adapter *adap)
2643 {
2644         unsigned int i;
2645
2646         mutex_lock(&uld_mutex);
2647         list_del(&adap->list_node);
2648
2649         for (i = 0; i < CXGB4_ULD_MAX; i++)
2650                 if (adap->uld && adap->uld[i].handle)
2651                         adap->uld[i].state_change(adap->uld[i].handle,
2652                                              CXGB4_STATE_DETACH);
2653
2654         if (netevent_registered && list_empty(&adapter_list)) {
2655                 unregister_netevent_notifier(&cxgb4_netevent_nb);
2656                 netevent_registered = false;
2657         }
2658         mutex_unlock(&uld_mutex);
2659 }
2660
2661 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2662 {
2663         unsigned int i;
2664
2665         mutex_lock(&uld_mutex);
2666         for (i = 0; i < CXGB4_ULD_MAX; i++)
2667                 if (adap->uld && adap->uld[i].handle)
2668                         adap->uld[i].state_change(adap->uld[i].handle,
2669                                                   new_state);
2670         mutex_unlock(&uld_mutex);
2671 }
2672
2673 #if IS_ENABLED(CONFIG_IPV6)
2674 static int cxgb4_inet6addr_handler(struct notifier_block *this,
2675                                    unsigned long event, void *data)
2676 {
2677         struct inet6_ifaddr *ifa = data;
2678         struct net_device *event_dev = ifa->idev->dev;
2679         const struct device *parent = NULL;
2680 #if IS_ENABLED(CONFIG_BONDING)
2681         struct adapter *adap;
2682 #endif
2683         if (is_vlan_dev(event_dev))
2684                 event_dev = vlan_dev_real_dev(event_dev);
2685 #if IS_ENABLED(CONFIG_BONDING)
2686         if (event_dev->flags & IFF_MASTER) {
2687                 list_for_each_entry(adap, &adapter_list, list_node) {
2688                         switch (event) {
2689                         case NETDEV_UP:
2690                                 cxgb4_clip_get(adap->port[0],
2691                                                (const u32 *)ifa, 1);
2692                                 break;
2693                         case NETDEV_DOWN:
2694                                 cxgb4_clip_release(adap->port[0],
2695                                                    (const u32 *)ifa, 1);
2696                                 break;
2697                         default:
2698                                 break;
2699                         }
2700                 }
2701                 return NOTIFY_OK;
2702         }
2703 #endif
2704
2705         if (event_dev)
2706                 parent = event_dev->dev.parent;
2707
2708         if (parent && parent->driver == &cxgb4_driver.driver) {
2709                 switch (event) {
2710                 case NETDEV_UP:
2711                         cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
2712                         break;
2713                 case NETDEV_DOWN:
2714                         cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
2715                         break;
2716                 default:
2717                         break;
2718                 }
2719         }
2720         return NOTIFY_OK;
2721 }
2722
2723 static bool inet6addr_registered;
2724 static struct notifier_block cxgb4_inet6addr_notifier = {
2725         .notifier_call = cxgb4_inet6addr_handler
2726 };
2727
2728 static void update_clip(const struct adapter *adap)
2729 {
2730         int i;
2731         struct net_device *dev;
2732         int ret;
2733
2734         rcu_read_lock();
2735
2736         for (i = 0; i < MAX_NPORTS; i++) {
2737                 dev = adap->port[i];
2738                 ret = 0;
2739
2740                 if (dev)
2741                         ret = cxgb4_update_root_dev_clip(dev);
2742
2743                 if (ret < 0)
2744                         break;
2745         }
2746         rcu_read_unlock();
2747 }
2748 #endif /* IS_ENABLED(CONFIG_IPV6) */
2749
2750 /**
2751  *      cxgb_up - enable the adapter
2752  *      @adap: adapter being enabled
2753  *
2754  *      Called when the first port is enabled, this function performs the
2755  *      actions necessary to make an adapter operational, such as completing
2756  *      the initialization of HW modules, and enabling interrupts.
2757  *
2758  *      Must be called with the rtnl lock held.
2759  */
2760 static int cxgb_up(struct adapter *adap)
2761 {
2762         struct sge *s = &adap->sge;
2763         int err;
2764
2765         mutex_lock(&uld_mutex);
2766         err = setup_sge_queues(adap);
2767         if (err)
2768                 goto rel_lock;
2769         err = setup_rss(adap);
2770         if (err)
2771                 goto freeq;
2772
2773         if (adap->flags & CXGB4_USING_MSIX) {
2774                 if (s->nd_msix_idx < 0) {
2775                         err = -ENOMEM;
2776                         goto irq_err;
2777                 }
2778
2779                 err = request_irq(adap->msix_info[s->nd_msix_idx].vec,
2780                                   t4_nondata_intr, 0,
2781                                   adap->msix_info[s->nd_msix_idx].desc, adap);
2782                 if (err)
2783                         goto irq_err;
2784
2785                 err = request_msix_queue_irqs(adap);
2786                 if (err)
2787                         goto irq_err_free_nd_msix;
2788         } else {
2789                 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2790                                   (adap->flags & CXGB4_USING_MSI) ? 0
2791                                                                   : IRQF_SHARED,
2792                                   adap->port[0]->name, adap);
2793                 if (err)
2794                         goto irq_err;
2795         }
2796
2797         enable_rx(adap);
2798         t4_sge_start(adap);
2799         t4_intr_enable(adap);
2800         adap->flags |= CXGB4_FULL_INIT_DONE;
2801         mutex_unlock(&uld_mutex);
2802
2803         notify_ulds(adap, CXGB4_STATE_UP);
2804 #if IS_ENABLED(CONFIG_IPV6)
2805         update_clip(adap);
2806 #endif
2807         return err;
2808
2809 irq_err_free_nd_msix:
2810         free_irq(adap->msix_info[s->nd_msix_idx].vec, adap);
2811 irq_err:
2812         dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2813 freeq:
2814         t4_free_sge_resources(adap);
2815 rel_lock:
2816         mutex_unlock(&uld_mutex);
2817         return err;
2818 }
2819
2820 static void cxgb_down(struct adapter *adapter)
2821 {
2822         cancel_work_sync(&adapter->tid_release_task);
2823         cancel_work_sync(&adapter->db_full_task);
2824         cancel_work_sync(&adapter->db_drop_task);
2825         adapter->tid_release_task_busy = false;
2826         adapter->tid_release_head = NULL;
2827
2828         t4_sge_stop(adapter);
2829         t4_free_sge_resources(adapter);
2830
2831         adapter->flags &= ~CXGB4_FULL_INIT_DONE;
2832 }
2833
2834 /*
2835  * net_device operations
2836  */
2837 int cxgb_open(struct net_device *dev)
2838 {
2839         struct port_info *pi = netdev_priv(dev);
2840         struct adapter *adapter = pi->adapter;
2841         int err;
2842
2843         netif_carrier_off(dev);
2844
2845         if (!(adapter->flags & CXGB4_FULL_INIT_DONE)) {
2846                 err = cxgb_up(adapter);
2847                 if (err < 0)
2848                         return err;
2849         }
2850
2851         /* It's possible that the basic port information could have
2852          * changed since we first read it.
2853          */
2854         err = t4_update_port_info(pi);
2855         if (err < 0)
2856                 return err;
2857
2858         err = link_start(dev);
2859         if (err)
2860                 return err;
2861
2862         if (pi->nmirrorqsets) {
2863                 mutex_lock(&pi->vi_mirror_mutex);
2864                 err = cxgb4_port_mirror_alloc_queues(dev);
2865                 if (err)
2866                         goto out_unlock;
2867
2868                 err = cxgb4_port_mirror_start(dev);
2869                 if (err)
2870                         goto out_free_queues;
2871                 mutex_unlock(&pi->vi_mirror_mutex);
2872         }
2873
2874         netif_tx_start_all_queues(dev);
2875         return 0;
2876
2877 out_free_queues:
2878         cxgb4_port_mirror_free_queues(dev);
2879
2880 out_unlock:
2881         mutex_unlock(&pi->vi_mirror_mutex);
2882         return err;
2883 }
2884
2885 int cxgb_close(struct net_device *dev)
2886 {
2887         struct port_info *pi = netdev_priv(dev);
2888         struct adapter *adapter = pi->adapter;
2889         int ret;
2890
2891         netif_tx_stop_all_queues(dev);
2892         netif_carrier_off(dev);
2893         ret = t4_enable_pi_params(adapter, adapter->pf, pi,
2894                                   false, false, false);
2895 #ifdef CONFIG_CHELSIO_T4_DCB
2896         cxgb4_dcb_reset(dev);
2897         dcb_tx_queue_prio_enable(dev, false);
2898 #endif
2899         if (ret)
2900                 return ret;
2901
2902         if (pi->nmirrorqsets) {
2903                 mutex_lock(&pi->vi_mirror_mutex);
2904                 cxgb4_port_mirror_stop(dev);
2905                 cxgb4_port_mirror_free_queues(dev);
2906                 mutex_unlock(&pi->vi_mirror_mutex);
2907         }
2908
2909         return 0;
2910 }
2911
2912 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
2913                 __be32 sip, __be16 sport, __be16 vlan,
2914                 unsigned int queue, unsigned char port, unsigned char mask)
2915 {
2916         int ret;
2917         struct filter_entry *f;
2918         struct adapter *adap;
2919         int i;
2920         u8 *val;
2921
2922         adap = netdev2adap(dev);
2923
2924         /* Adjust stid to correct filter index */
2925         stid -= adap->tids.sftid_base;
2926         stid += adap->tids.nftids;
2927
2928         /* Check to make sure the filter requested is writable ...
2929          */
2930         f = &adap->tids.ftid_tab[stid];
2931         ret = writable_filter(f);
2932         if (ret)
2933                 return ret;
2934
2935         /* Clear out any old resources being used by the filter before
2936          * we start constructing the new filter.
2937          */
2938         if (f->valid)
2939                 clear_filter(adap, f);
2940
2941         /* Clear out filter specifications */
2942         memset(&f->fs, 0, sizeof(struct ch_filter_specification));
2943         f->fs.val.lport = be16_to_cpu(sport);
2944         f->fs.mask.lport  = ~0;
2945         val = (u8 *)&sip;
2946         if ((val[0] | val[1] | val[2] | val[3]) != 0) {
2947                 for (i = 0; i < 4; i++) {
2948                         f->fs.val.lip[i] = val[i];
2949                         f->fs.mask.lip[i] = ~0;
2950                 }
2951                 if (adap->params.tp.vlan_pri_map & PORT_F) {
2952                         f->fs.val.iport = port;
2953                         f->fs.mask.iport = mask;
2954                 }
2955         }
2956
2957         if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
2958                 f->fs.val.proto = IPPROTO_TCP;
2959                 f->fs.mask.proto = ~0;
2960         }
2961
2962         f->fs.dirsteer = 1;
2963         f->fs.iq = queue;
2964         /* Mark filter as locked */
2965         f->locked = 1;
2966         f->fs.rpttid = 1;
2967
2968         /* Save the actual tid. We need this to get the corresponding
2969          * filter entry structure in filter_rpl.
2970          */
2971         f->tid = stid + adap->tids.ftid_base;
2972         ret = set_filter_wr(adap, stid);
2973         if (ret) {
2974                 clear_filter(adap, f);
2975                 return ret;
2976         }
2977
2978         return 0;
2979 }
2980 EXPORT_SYMBOL(cxgb4_create_server_filter);
2981
2982 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
2983                 unsigned int queue, bool ipv6)
2984 {
2985         struct filter_entry *f;
2986         struct adapter *adap;
2987
2988         adap = netdev2adap(dev);
2989
2990         /* Adjust stid to correct filter index */
2991         stid -= adap->tids.sftid_base;
2992         stid += adap->tids.nftids;
2993
2994         f = &adap->tids.ftid_tab[stid];
2995         /* Unlock the filter */
2996         f->locked = 0;
2997
2998         return delete_filter(adap, stid);
2999 }
3000 EXPORT_SYMBOL(cxgb4_remove_server_filter);
3001
3002 static void cxgb_get_stats(struct net_device *dev,
3003                            struct rtnl_link_stats64 *ns)
3004 {
3005         struct port_stats stats;
3006         struct port_info *p = netdev_priv(dev);
3007         struct adapter *adapter = p->adapter;
3008
3009         /* Block retrieving statistics during EEH error
3010          * recovery. Otherwise, the recovery might fail
3011          * and the PCI device will be removed permanently
3012          */
3013         spin_lock(&adapter->stats_lock);
3014         if (!netif_device_present(dev)) {
3015                 spin_unlock(&adapter->stats_lock);
3016                 return;
3017         }
3018         t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
3019                                  &p->stats_base);
3020         spin_unlock(&adapter->stats_lock);
3021
3022         ns->tx_bytes   = stats.tx_octets;
3023         ns->tx_packets = stats.tx_frames;
3024         ns->rx_bytes   = stats.rx_octets;
3025         ns->rx_packets = stats.rx_frames;
3026         ns->multicast  = stats.rx_mcast_frames;
3027
3028         /* detailed rx_errors */
3029         ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
3030                                stats.rx_runt;
3031         ns->rx_over_errors   = 0;
3032         ns->rx_crc_errors    = stats.rx_fcs_err;
3033         ns->rx_frame_errors  = stats.rx_symbol_err;
3034         ns->rx_dropped       = stats.rx_ovflow0 + stats.rx_ovflow1 +
3035                                stats.rx_ovflow2 + stats.rx_ovflow3 +
3036                                stats.rx_trunc0 + stats.rx_trunc1 +
3037                                stats.rx_trunc2 + stats.rx_trunc3;
3038         ns->rx_missed_errors = 0;
3039
3040         /* detailed tx_errors */
3041         ns->tx_aborted_errors   = 0;
3042         ns->tx_carrier_errors   = 0;
3043         ns->tx_fifo_errors      = 0;
3044         ns->tx_heartbeat_errors = 0;
3045         ns->tx_window_errors    = 0;
3046
3047         ns->tx_errors = stats.tx_error_frames;
3048         ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
3049                 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
3050 }
3051
3052 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
3053 {
3054         unsigned int mbox;
3055         int ret = 0, prtad, devad;
3056         struct port_info *pi = netdev_priv(dev);
3057         struct adapter *adapter = pi->adapter;
3058         struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
3059
3060         switch (cmd) {
3061         case SIOCGMIIPHY:
3062                 if (pi->mdio_addr < 0)
3063                         return -EOPNOTSUPP;
3064                 data->phy_id = pi->mdio_addr;
3065                 break;
3066         case SIOCGMIIREG:
3067         case SIOCSMIIREG:
3068                 if (mdio_phy_id_is_c45(data->phy_id)) {
3069                         prtad = mdio_phy_id_prtad(data->phy_id);
3070                         devad = mdio_phy_id_devad(data->phy_id);
3071                 } else if (data->phy_id < 32) {
3072                         prtad = data->phy_id;
3073                         devad = 0;
3074                         data->reg_num &= 0x1f;
3075                 } else
3076                         return -EINVAL;
3077
3078                 mbox = pi->adapter->pf;
3079                 if (cmd == SIOCGMIIREG)
3080                         ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
3081                                          data->reg_num, &data->val_out);
3082                 else
3083                         ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
3084                                          data->reg_num, data->val_in);
3085                 break;
3086         case SIOCGHWTSTAMP:
3087                 return copy_to_user(req->ifr_data, &pi->tstamp_config,
3088                                     sizeof(pi->tstamp_config)) ?
3089                         -EFAULT : 0;
3090         case SIOCSHWTSTAMP:
3091                 if (copy_from_user(&pi->tstamp_config, req->ifr_data,
3092                                    sizeof(pi->tstamp_config)))
3093                         return -EFAULT;
3094
3095                 if (!is_t4(adapter->params.chip)) {
3096                         switch (pi->tstamp_config.tx_type) {
3097                         case HWTSTAMP_TX_OFF:
3098                         case HWTSTAMP_TX_ON:
3099                                 break;
3100                         default:
3101                                 return -ERANGE;
3102                         }
3103
3104                         switch (pi->tstamp_config.rx_filter) {
3105                         case HWTSTAMP_FILTER_NONE:
3106                                 pi->rxtstamp = false;
3107                                 break;
3108                         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3109                         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3110                                 cxgb4_ptprx_timestamping(pi, pi->port_id,
3111                                                          PTP_TS_L4);
3112                                 break;
3113                         case HWTSTAMP_FILTER_PTP_V2_EVENT:
3114                                 cxgb4_ptprx_timestamping(pi, pi->port_id,
3115                                                          PTP_TS_L2_L4);
3116                                 break;
3117                         case HWTSTAMP_FILTER_ALL:
3118                         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3119                         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3120                         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3121                         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3122                                 pi->rxtstamp = true;
3123                                 break;
3124                         default:
3125                                 pi->tstamp_config.rx_filter =
3126                                         HWTSTAMP_FILTER_NONE;
3127                                 return -ERANGE;
3128                         }
3129
3130                         if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
3131                             (pi->tstamp_config.rx_filter ==
3132                                 HWTSTAMP_FILTER_NONE)) {
3133                                 if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
3134                                         pi->ptp_enable = false;
3135                         }
3136
3137                         if (pi->tstamp_config.rx_filter !=
3138                                 HWTSTAMP_FILTER_NONE) {
3139                                 if (cxgb4_ptp_redirect_rx_packet(adapter,
3140                                                                  pi) >= 0)
3141                                         pi->ptp_enable = true;
3142                         }
3143                 } else {
3144                         /* For T4 Adapters */
3145                         switch (pi->tstamp_config.rx_filter) {
3146                         case HWTSTAMP_FILTER_NONE:
3147                         pi->rxtstamp = false;
3148                         break;
3149                         case HWTSTAMP_FILTER_ALL:
3150                         pi->rxtstamp = true;
3151                         break;
3152                         default:
3153                         pi->tstamp_config.rx_filter =
3154                         HWTSTAMP_FILTER_NONE;
3155                         return -ERANGE;
3156                         }
3157                 }
3158                 return copy_to_user(req->ifr_data, &pi->tstamp_config,
3159                                     sizeof(pi->tstamp_config)) ?
3160                         -EFAULT : 0;
3161         default:
3162                 return -EOPNOTSUPP;
3163         }
3164         return ret;
3165 }
3166
3167 static void cxgb_set_rxmode(struct net_device *dev)
3168 {
3169         /* unfortunately we can't return errors to the stack */
3170         set_rxmode(dev, -1, false);
3171 }
3172
3173 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
3174 {
3175         struct port_info *pi = netdev_priv(dev);
3176         int ret;
3177
3178         ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
3179                             pi->viid_mirror, new_mtu, -1, -1, -1, -1, true);
3180         if (!ret)
3181                 dev->mtu = new_mtu;
3182         return ret;
3183 }
3184
3185 #ifdef CONFIG_PCI_IOV
3186 static int cxgb4_mgmt_open(struct net_device *dev)
3187 {
3188         /* Turn carrier off since we don't have to transmit anything on this
3189          * interface.
3190          */
3191         netif_carrier_off(dev);
3192         return 0;
3193 }
3194
3195 /* Fill MAC address that will be assigned by the FW */
3196 static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
3197 {
3198         u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
3199         unsigned int i, vf, nvfs;
3200         u16 a, b;
3201         int err;
3202         u8 *na;
3203
3204         err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
3205         if (err)
3206                 return;
3207
3208         na = adap->params.vpd.na;
3209         for (i = 0; i < ETH_ALEN; i++)
3210                 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
3211                               hex2val(na[2 * i + 1]));
3212
3213         a = (hw_addr[0] << 8) | hw_addr[1];
3214         b = (hw_addr[1] << 8) | hw_addr[2];
3215         a ^= b;
3216         a |= 0x0200;    /* locally assigned Ethernet MAC address */
3217         a &= ~0x0100;   /* not a multicast Ethernet MAC address */
3218         macaddr[0] = a >> 8;
3219         macaddr[1] = a & 0xff;
3220
3221         for (i = 2; i < 5; i++)
3222                 macaddr[i] = hw_addr[i + 1];
3223
3224         for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
3225                 vf < nvfs; vf++) {
3226                 macaddr[5] = adap->pf * nvfs + vf;
3227                 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
3228         }
3229 }
3230
3231 static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3232 {
3233         struct port_info *pi = netdev_priv(dev);
3234         struct adapter *adap = pi->adapter;
3235         int ret;
3236
3237         /* verify MAC addr is valid */
3238         if (!is_valid_ether_addr(mac)) {
3239                 dev_err(pi->adapter->pdev_dev,
3240                         "Invalid Ethernet address %pM for VF %d\n",
3241                         mac, vf);
3242                 return -EINVAL;
3243         }
3244
3245         dev_info(pi->adapter->pdev_dev,
3246                  "Setting MAC %pM on VF %d\n", mac, vf);
3247         ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
3248         if (!ret)
3249                 ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
3250         return ret;
3251 }
3252
3253 static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
3254                                     int vf, struct ifla_vf_info *ivi)
3255 {
3256         struct port_info *pi = netdev_priv(dev);
3257         struct adapter *adap = pi->adapter;
3258         struct vf_info *vfinfo;
3259
3260         if (vf >= adap->num_vfs)
3261                 return -EINVAL;
3262         vfinfo = &adap->vfinfo[vf];
3263
3264         ivi->vf = vf;
3265         ivi->max_tx_rate = vfinfo->tx_rate;
3266         ivi->min_tx_rate = 0;
3267         ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
3268         ivi->vlan = vfinfo->vlan;
3269         ivi->linkstate = vfinfo->link_state;
3270         return 0;
3271 }
3272
3273 static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
3274                                        struct netdev_phys_item_id *ppid)
3275 {
3276         struct port_info *pi = netdev_priv(dev);
3277         unsigned int phy_port_id;
3278
3279         phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
3280         ppid->id_len = sizeof(phy_port_id);
3281         memcpy(ppid->id, &phy_port_id, ppid->id_len);
3282         return 0;
3283 }
3284
3285 static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
3286                                   int min_tx_rate, int max_tx_rate)
3287 {
3288         struct port_info *pi = netdev_priv(dev);
3289         struct adapter *adap = pi->adapter;
3290         unsigned int link_ok, speed, mtu;
3291         u32 fw_pfvf, fw_class;
3292         int class_id = vf;
3293         int ret;
3294         u16 pktsize;
3295
3296         if (vf >= adap->num_vfs)
3297                 return -EINVAL;
3298
3299         if (min_tx_rate) {
3300                 dev_err(adap->pdev_dev,
3301                         "Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
3302                         min_tx_rate, vf);
3303                 return -EINVAL;
3304         }
3305
3306         if (max_tx_rate == 0) {
3307                 /* unbind VF to to any Traffic Class */
3308                 fw_pfvf =
3309                     (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3310                      FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
3311                 fw_class = 0xffffffff;
3312                 ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
3313                                     &fw_pfvf, &fw_class);
3314                 if (ret) {
3315                         dev_err(adap->pdev_dev,
3316                                 "Err %d in unbinding PF %d VF %d from TX Rate Limiting\n",
3317                                 ret, adap->pf, vf);
3318                         return -EINVAL;
3319                 }
3320                 dev_info(adap->pdev_dev,
3321                          "PF %d VF %d is unbound from TX Rate Limiting\n",
3322                          adap->pf, vf);
3323                 adap->vfinfo[vf].tx_rate = 0;
3324                 return 0;
3325         }
3326
3327         ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
3328         if (ret != FW_SUCCESS) {
3329                 dev_err(adap->pdev_dev,
3330                         "Failed to get link information for VF %d\n", vf);
3331                 return -EINVAL;
3332         }
3333
3334         if (!link_ok) {
3335                 dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
3336                 return -EINVAL;
3337         }
3338
3339         if (max_tx_rate > speed) {
3340                 dev_err(adap->pdev_dev,
3341                         "Max tx rate %d for VF %d can't be > link-speed %u",
3342                         max_tx_rate, vf, speed);
3343                 return -EINVAL;
3344         }
3345
3346         pktsize = mtu;
3347         /* subtract ethhdr size and 4 bytes crc since, f/w appends it */
3348         pktsize = pktsize - sizeof(struct ethhdr) - 4;
3349         /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */
3350         pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
3351         /* configure Traffic Class for rate-limiting */
3352         ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET,
3353                               SCHED_CLASS_LEVEL_CL_RL,
3354                               SCHED_CLASS_MODE_CLASS,
3355                               SCHED_CLASS_RATEUNIT_BITS,
3356                               SCHED_CLASS_RATEMODE_ABS,
3357                               pi->tx_chan, class_id, 0,
3358                               max_tx_rate * 1000, 0, pktsize, 0);
3359         if (ret) {
3360                 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
3361                         ret);
3362                 return -EINVAL;
3363         }
3364         dev_info(adap->pdev_dev,
3365                  "Class %d with MSS %u configured with rate %u\n",
3366                  class_id, pktsize, max_tx_rate);
3367
3368         /* bind VF to configured Traffic Class */
3369         fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3370                    FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
3371         fw_class = class_id;
3372         ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
3373                             &fw_class);
3374         if (ret) {
3375                 dev_err(adap->pdev_dev,
3376                         "Err %d in binding PF %d VF %d to Traffic Class %d\n",
3377                         ret, adap->pf, vf, class_id);
3378                 return -EINVAL;
3379         }
3380         dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
3381                  adap->pf, vf, class_id);
3382         adap->vfinfo[vf].tx_rate = max_tx_rate;
3383         return 0;
3384 }
3385
3386 static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
3387                                   u16 vlan, u8 qos, __be16 vlan_proto)
3388 {
3389         struct port_info *pi = netdev_priv(dev);
3390         struct adapter *adap = pi->adapter;
3391         int ret;
3392
3393         if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
3394                 return -EINVAL;
3395
3396         if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
3397                 return -EPROTONOSUPPORT;
3398
3399         ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
3400         if (!ret) {
3401                 adap->vfinfo[vf].vlan = vlan;
3402                 return 0;
3403         }
3404
3405         dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
3406                 ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
3407         return ret;
3408 }
3409
3410 static int cxgb4_mgmt_set_vf_link_state(struct net_device *dev, int vf,
3411                                         int link)
3412 {
3413         struct port_info *pi = netdev_priv(dev);
3414         struct adapter *adap = pi->adapter;
3415         u32 param, val;
3416         int ret = 0;
3417
3418         if (vf >= adap->num_vfs)
3419                 return -EINVAL;
3420
3421         switch (link) {
3422         case IFLA_VF_LINK_STATE_AUTO:
3423                 val = FW_VF_LINK_STATE_AUTO;
3424                 break;
3425
3426         case IFLA_VF_LINK_STATE_ENABLE:
3427                 val = FW_VF_LINK_STATE_ENABLE;
3428                 break;
3429
3430         case IFLA_VF_LINK_STATE_DISABLE:
3431                 val = FW_VF_LINK_STATE_DISABLE;
3432                 break;
3433
3434         default:
3435                 return -EINVAL;
3436         }
3437
3438         param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
3439                  FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_LINK_STATE));
3440         ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1,
3441                             &param, &val);
3442         if (ret) {
3443                 dev_err(adap->pdev_dev,
3444                         "Error %d in setting PF %d VF %d link state\n",
3445                         ret, adap->pf, vf);
3446                 return -EINVAL;
3447         }
3448
3449         adap->vfinfo[vf].link_state = link;
3450         return ret;
3451 }
3452 #endif /* CONFIG_PCI_IOV */
3453
3454 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3455 {
3456         int ret;
3457         struct sockaddr *addr = p;
3458         struct port_info *pi = netdev_priv(dev);
3459
3460         if (!is_valid_ether_addr(addr->sa_data))
3461                 return -EADDRNOTAVAIL;
3462
3463         ret = cxgb4_update_mac_filt(pi, pi->viid, &pi->xact_addr_filt,
3464                                     addr->sa_data, true, &pi->smt_idx);
3465         if (ret < 0)
3466                 return ret;
3467
3468         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3469         return 0;
3470 }
3471
3472 #ifdef CONFIG_NET_POLL_CONTROLLER
3473 static void cxgb_netpoll(struct net_device *dev)
3474 {
3475         struct port_info *pi = netdev_priv(dev);
3476         struct adapter *adap = pi->adapter;
3477
3478         if (adap->flags & CXGB4_USING_MSIX) {
3479                 int i;
3480                 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3481
3482                 for (i = pi->nqsets; i; i--, rx++)
3483                         t4_sge_intr_msix(0, &rx->rspq);
3484         } else
3485                 t4_intr_handler(adap)(0, adap);
3486 }
3487 #endif
3488
3489 static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
3490 {
3491         struct port_info *pi = netdev_priv(dev);
3492         struct adapter *adap = pi->adapter;
3493         struct ch_sched_queue qe = { 0 };
3494         struct ch_sched_params p = { 0 };
3495         struct sched_class *e;
3496         u32 req_rate;
3497         int err = 0;
3498
3499         if (!can_sched(dev))
3500                 return -ENOTSUPP;
3501
3502         if (index < 0 || index > pi->nqsets - 1)
3503                 return -EINVAL;
3504
3505         if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3506                 dev_err(adap->pdev_dev,
3507                         "Failed to rate limit on queue %d. Link Down?\n",
3508                         index);
3509                 return -EINVAL;
3510         }
3511
3512         qe.queue = index;
3513         e = cxgb4_sched_queue_lookup(dev, &qe);
3514         if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CL_RL) {
3515                 dev_err(adap->pdev_dev,
3516                         "Queue %u already bound to class %u of type: %u\n",
3517                         index, e->idx, e->info.u.params.level);
3518                 return -EBUSY;
3519         }
3520
3521         /* Convert from Mbps to Kbps */
3522         req_rate = rate * 1000;
3523
3524         /* Max rate is 100 Gbps */
3525         if (req_rate > SCHED_MAX_RATE_KBPS) {
3526                 dev_err(adap->pdev_dev,
3527                         "Invalid rate %u Mbps, Max rate is %u Mbps\n",
3528                         rate, SCHED_MAX_RATE_KBPS / 1000);
3529                 return -ERANGE;
3530         }
3531
3532         /* First unbind the queue from any existing class */
3533         memset(&qe, 0, sizeof(qe));
3534         qe.queue = index;
3535         qe.class = SCHED_CLS_NONE;
3536
3537         err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
3538         if (err) {
3539                 dev_err(adap->pdev_dev,
3540                         "Unbinding Queue %d on port %d fail. Err: %d\n",
3541                         index, pi->port_id, err);
3542                 return err;
3543         }
3544
3545         /* Queue already unbound */
3546         if (!req_rate)
3547                 return 0;
3548
3549         /* Fetch any available unused or matching scheduling class */
3550         p.type = SCHED_CLASS_TYPE_PACKET;
3551         p.u.params.level    = SCHED_CLASS_LEVEL_CL_RL;
3552         p.u.params.mode     = SCHED_CLASS_MODE_CLASS;
3553         p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
3554         p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
3555         p.u.params.channel  = pi->tx_chan;
3556         p.u.params.class    = SCHED_CLS_NONE;
3557         p.u.params.minrate  = 0;
3558         p.u.params.maxrate  = req_rate;
3559         p.u.params.weight   = 0;
3560         p.u.params.pktsize  = dev->mtu;
3561
3562         e = cxgb4_sched_class_alloc(dev, &p);
3563         if (!e)
3564                 return -ENOMEM;
3565
3566         /* Bind the queue to a scheduling class */
3567         memset(&qe, 0, sizeof(qe));
3568         qe.queue = index;
3569         qe.class = e->idx;
3570
3571         err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
3572         if (err)
3573                 dev_err(adap->pdev_dev,
3574                         "Queue rate limiting failed. Err: %d\n", err);
3575         return err;
3576 }
3577
3578 static int cxgb_setup_tc_flower(struct net_device *dev,
3579                                 struct flow_cls_offload *cls_flower)
3580 {
3581         switch (cls_flower->command) {
3582         case FLOW_CLS_REPLACE:
3583                 return cxgb4_tc_flower_replace(dev, cls_flower);
3584         case FLOW_CLS_DESTROY:
3585                 return cxgb4_tc_flower_destroy(dev, cls_flower);
3586         case FLOW_CLS_STATS:
3587                 return cxgb4_tc_flower_stats(dev, cls_flower);
3588         default:
3589                 return -EOPNOTSUPP;
3590         }
3591 }
3592
3593 static int cxgb_setup_tc_cls_u32(struct net_device *dev,
3594                                  struct tc_cls_u32_offload *cls_u32)
3595 {
3596         switch (cls_u32->command) {
3597         case TC_CLSU32_NEW_KNODE:
3598         case TC_CLSU32_REPLACE_KNODE:
3599                 return cxgb4_config_knode(dev, cls_u32);
3600         case TC_CLSU32_DELETE_KNODE:
3601                 return cxgb4_delete_knode(dev, cls_u32);
3602         default:
3603                 return -EOPNOTSUPP;
3604         }
3605 }
3606
3607 static int cxgb_setup_tc_matchall(struct net_device *dev,
3608                                   struct tc_cls_matchall_offload *cls_matchall,
3609                                   bool ingress)
3610 {
3611         struct adapter *adap = netdev2adap(dev);
3612
3613         if (!adap->tc_matchall)
3614                 return -ENOMEM;
3615
3616         switch (cls_matchall->command) {
3617         case TC_CLSMATCHALL_REPLACE:
3618                 return cxgb4_tc_matchall_replace(dev, cls_matchall, ingress);
3619         case TC_CLSMATCHALL_DESTROY:
3620                 return cxgb4_tc_matchall_destroy(dev, cls_matchall, ingress);
3621         case TC_CLSMATCHALL_STATS:
3622                 if (ingress)
3623                         return cxgb4_tc_matchall_stats(dev, cls_matchall);
3624                 break;
3625         default:
3626                 break;
3627         }
3628
3629         return -EOPNOTSUPP;
3630 }
3631
3632 static int cxgb_setup_tc_block_ingress_cb(enum tc_setup_type type,
3633                                           void *type_data, void *cb_priv)
3634 {
3635         struct net_device *dev = cb_priv;
3636         struct port_info *pi = netdev2pinfo(dev);
3637         struct adapter *adap = netdev2adap(dev);
3638
3639         if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3640                 dev_err(adap->pdev_dev,
3641                         "Failed to setup tc on port %d. Link Down?\n",
3642                         pi->port_id);
3643                 return -EINVAL;
3644         }
3645
3646         if (!tc_cls_can_offload_and_chain0(dev, type_data))
3647                 return -EOPNOTSUPP;
3648
3649         switch (type) {
3650         case TC_SETUP_CLSU32:
3651                 return cxgb_setup_tc_cls_u32(dev, type_data);
3652         case TC_SETUP_CLSFLOWER:
3653                 return cxgb_setup_tc_flower(dev, type_data);
3654         case TC_SETUP_CLSMATCHALL:
3655                 return cxgb_setup_tc_matchall(dev, type_data, true);
3656         default:
3657                 return -EOPNOTSUPP;
3658         }
3659 }
3660
3661 static int cxgb_setup_tc_block_egress_cb(enum tc_setup_type type,
3662                                          void *type_data, void *cb_priv)
3663 {
3664         struct net_device *dev = cb_priv;
3665         struct port_info *pi = netdev2pinfo(dev);
3666         struct adapter *adap = netdev2adap(dev);
3667
3668         if (!(adap->flags & CXGB4_FULL_INIT_DONE)) {
3669                 dev_err(adap->pdev_dev,
3670                         "Failed to setup tc on port %d. Link Down?\n",
3671                         pi->port_id);
3672                 return -EINVAL;
3673         }
3674
3675         if (!tc_cls_can_offload_and_chain0(dev, type_data))
3676                 return -EOPNOTSUPP;
3677
3678         switch (type) {
3679         case TC_SETUP_CLSMATCHALL:
3680                 return cxgb_setup_tc_matchall(dev, type_data, false);
3681         default:
3682                 break;
3683         }
3684
3685         return -EOPNOTSUPP;
3686 }
3687
3688 static int cxgb_setup_tc_mqprio(struct net_device *dev,
3689                                 struct tc_mqprio_qopt_offload *mqprio)
3690 {
3691         struct adapter *adap = netdev2adap(dev);
3692
3693         if (!is_ethofld(adap) || !adap->tc_mqprio)
3694                 return -ENOMEM;
3695
3696         return cxgb4_setup_tc_mqprio(dev, mqprio);
3697 }
3698
3699 static LIST_HEAD(cxgb_block_cb_list);
3700
3701 static int cxgb_setup_tc_block(struct net_device *dev,
3702                                struct flow_block_offload *f)
3703 {
3704         struct port_info *pi = netdev_priv(dev);
3705         flow_setup_cb_t *cb;
3706         bool ingress_only;
3707
3708         pi->tc_block_shared = f->block_shared;
3709         if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
3710                 cb = cxgb_setup_tc_block_egress_cb;
3711                 ingress_only = false;
3712         } else {
3713                 cb = cxgb_setup_tc_block_ingress_cb;
3714                 ingress_only = true;
3715         }
3716
3717         return flow_block_cb_setup_simple(f, &cxgb_block_cb_list,
3718                                           cb, pi, dev, ingress_only);
3719 }
3720
3721 static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
3722                          void *type_data)
3723 {
3724         switch (type) {
3725         case TC_SETUP_QDISC_MQPRIO:
3726                 return cxgb_setup_tc_mqprio(dev, type_data);
3727         case TC_SETUP_BLOCK:
3728                 return cxgb_setup_tc_block(dev, type_data);
3729         default:
3730                 return -EOPNOTSUPP;
3731         }
3732 }
3733
3734 static int cxgb_udp_tunnel_unset_port(struct net_device *netdev,
3735                                       unsigned int table, unsigned int entry,
3736                                       struct udp_tunnel_info *ti)
3737 {
3738         struct port_info *pi = netdev_priv(netdev);
3739         struct adapter *adapter = pi->adapter;
3740         u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3741         int ret = 0, i;
3742
3743         switch (ti->type) {
3744         case UDP_TUNNEL_TYPE_VXLAN:
3745                 adapter->vxlan_port = 0;
3746                 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
3747                 break;
3748         case UDP_TUNNEL_TYPE_GENEVE:
3749                 adapter->geneve_port = 0;
3750                 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
3751                 break;
3752         default:
3753                 return -EINVAL;
3754         }
3755
3756         /* Matchall mac entries can be deleted only after all tunnel ports
3757          * are brought down or removed.
3758          */
3759         if (!adapter->rawf_cnt)
3760                 return 0;
3761         for_each_port(adapter, i) {
3762                 pi = adap2pinfo(adapter, i);
3763                 ret = t4_free_raw_mac_filt(adapter, pi->viid,
3764                                            match_all_mac, match_all_mac,
3765                                            adapter->rawf_start + pi->port_id,
3766                                            1, pi->port_id, false);
3767                 if (ret < 0) {
3768                         netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
3769                                     i);
3770                         return ret;
3771                 }
3772         }
3773
3774         return 0;
3775 }
3776
3777 static int cxgb_udp_tunnel_set_port(struct net_device *netdev,
3778                                     unsigned int table, unsigned int entry,
3779                                     struct udp_tunnel_info *ti)
3780 {
3781         struct port_info *pi = netdev_priv(netdev);
3782         struct adapter *adapter = pi->adapter;
3783         u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
3784         int i, ret;
3785
3786         switch (ti->type) {
3787         case UDP_TUNNEL_TYPE_VXLAN:
3788                 adapter->vxlan_port = ti->port;
3789                 t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
3790                              VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
3791                 break;
3792         case UDP_TUNNEL_TYPE_GENEVE:
3793                 adapter->geneve_port = ti->port;
3794                 t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
3795                              GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
3796                 break;
3797         default:
3798                 return -EINVAL;
3799         }
3800
3801         /* Create a 'match all' mac filter entry for inner mac,
3802          * if raw mac interface is supported. Once the linux kernel provides
3803          * driver entry points for adding/deleting the inner mac addresses,
3804          * we will remove this 'match all' entry and fallback to adding
3805          * exact match filters.
3806          */
3807         for_each_port(adapter, i) {
3808                 pi = adap2pinfo(adapter, i);
3809
3810                 ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
3811                                             match_all_mac,
3812                                             match_all_mac,
3813                                             adapter->rawf_start + pi->port_id,
3814                                             1, pi->port_id, false);
3815                 if (ret < 0) {
3816                         netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
3817                                     be16_to_cpu(ti->port));
3818                         return ret;
3819                 }
3820         }
3821
3822         return 0;
3823 }
3824
3825 static const struct udp_tunnel_nic_info cxgb_udp_tunnels = {
3826         .set_port       = cxgb_udp_tunnel_set_port,
3827         .unset_port     = cxgb_udp_tunnel_unset_port,
3828         .tables         = {
3829                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
3830                 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
3831         },
3832 };
3833
3834 static netdev_features_t cxgb_features_check(struct sk_buff *skb,
3835                                              struct net_device *dev,
3836                                              netdev_features_t features)
3837 {
3838         struct port_info *pi = netdev_priv(dev);
3839         struct adapter *adapter = pi->adapter;
3840
3841         if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
3842                 return features;
3843
3844         /* Check if hw supports offload for this packet */
3845         if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
3846                 return features;
3847
3848         /* Offload is not supported for this encapsulated packet */
3849         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3850 }
3851
3852 static netdev_features_t cxgb_fix_features(struct net_device *dev,
3853                                            netdev_features_t features)
3854 {
3855         /* Disable GRO, if RX_CSUM is disabled */
3856         if (!(features & NETIF_F_RXCSUM))
3857                 features &= ~NETIF_F_GRO;
3858
3859         return features;
3860 }
3861
3862 static const struct net_device_ops cxgb4_netdev_ops = {
3863         .ndo_open             = cxgb_open,
3864         .ndo_stop             = cxgb_close,
3865         .ndo_start_xmit       = t4_start_xmit,
3866         .ndo_select_queue     = cxgb_select_queue,
3867         .ndo_get_stats64      = cxgb_get_stats,
3868         .ndo_set_rx_mode      = cxgb_set_rxmode,
3869         .ndo_set_mac_address  = cxgb_set_mac_addr,
3870         .ndo_set_features     = cxgb_set_features,
3871         .ndo_validate_addr    = eth_validate_addr,
3872         .ndo_do_ioctl         = cxgb_ioctl,
3873         .ndo_change_mtu       = cxgb_change_mtu,
3874 #ifdef CONFIG_NET_POLL_CONTROLLER
3875         .ndo_poll_controller  = cxgb_netpoll,
3876 #endif
3877 #ifdef CONFIG_CHELSIO_T4_FCOE
3878         .ndo_fcoe_enable      = cxgb_fcoe_enable,
3879         .ndo_fcoe_disable     = cxgb_fcoe_disable,
3880 #endif /* CONFIG_CHELSIO_T4_FCOE */
3881         .ndo_set_tx_maxrate   = cxgb_set_tx_maxrate,
3882         .ndo_setup_tc         = cxgb_setup_tc,
3883         .ndo_features_check   = cxgb_features_check,
3884         .ndo_fix_features     = cxgb_fix_features,
3885 };
3886
3887 #ifdef CONFIG_PCI_IOV
3888 static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
3889         .ndo_open               = cxgb4_mgmt_open,
3890         .ndo_set_vf_mac         = cxgb4_mgmt_set_vf_mac,
3891         .ndo_get_vf_config      = cxgb4_mgmt_get_vf_config,
3892         .ndo_set_vf_rate        = cxgb4_mgmt_set_vf_rate,
3893         .ndo_get_phys_port_id   = cxgb4_mgmt_get_phys_port_id,
3894         .ndo_set_vf_vlan        = cxgb4_mgmt_set_vf_vlan,
3895         .ndo_set_vf_link_state  = cxgb4_mgmt_set_vf_link_state,
3896 };
3897 #endif
3898
3899 static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
3900                                    struct ethtool_drvinfo *info)
3901 {
3902         struct adapter *adapter = netdev2adap(dev);
3903
3904         strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
3905         strlcpy(info->bus_info, pci_name(adapter->pdev),
3906                 sizeof(info->bus_info));
3907 }
3908
3909 static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
3910         .get_drvinfo       = cxgb4_mgmt_get_drvinfo,
3911 };
3912
3913 static void notify_fatal_err(struct work_struct *work)
3914 {
3915         struct adapter *adap;
3916
3917         adap = container_of(work, struct adapter, fatal_err_notify_task);
3918         notify_ulds(adap, CXGB4_STATE_FATAL_ERROR);
3919 }
3920
3921 void t4_fatal_err(struct adapter *adap)
3922 {
3923         int port;
3924
3925         if (pci_channel_offline(adap->pdev))
3926                 return;
3927
3928         /* Disable the SGE since ULDs are going to free resources that
3929          * could be exposed to the adapter.  RDMA MWs for example...
3930          */
3931         t4_shutdown_adapter(adap);
3932         for_each_port(adap, port) {
3933                 struct net_device *dev = adap->port[port];
3934
3935                 /* If we get here in very early initialization the network
3936                  * devices may not have been set up yet.
3937                  */
3938                 if (!dev)
3939                         continue;
3940
3941                 netif_tx_stop_all_queues(dev);
3942                 netif_carrier_off(dev);
3943         }
3944         dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3945         queue_work(adap->workq, &adap->fatal_err_notify_task);
3946 }
3947
3948 static void setup_memwin(struct adapter *adap)
3949 {
3950         u32 nic_win_base = t4_get_util_window(adap);
3951
3952         t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
3953 }
3954
3955 static void setup_memwin_rdma(struct adapter *adap)
3956 {
3957         if (adap->vres.ocq.size) {
3958                 u32 start;
3959                 unsigned int sz_kb;
3960
3961                 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
3962                 start &= PCI_BASE_ADDRESS_MEM_MASK;
3963                 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3964                 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3965                 t4_write_reg(adap,
3966                              PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
3967                              start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
3968                 t4_write_reg(adap,
3969                              PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
3970                              adap->vres.ocq.start);
3971                 t4_read_reg(adap,
3972                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
3973         }
3974 }
3975
3976 /* HMA Definitions */
3977
3978 /* The maximum number of address that can be send in a single FW cmd */
3979 #define HMA_MAX_ADDR_IN_CMD     5
3980
3981 #define HMA_PAGE_SIZE           PAGE_SIZE
3982
3983 #define HMA_MAX_NO_FW_ADDRESS   (16 << 10)  /* FW supports 16K addresses */
3984
3985 #define HMA_PAGE_ORDER                                  \
3986         ((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ?      \
3987         ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)
3988
3989 /* The minimum and maximum possible HMA sizes that can be specified in the FW
3990  * configuration(in units of MB).
3991  */
3992 #define HMA_MIN_TOTAL_SIZE      1
3993 #define HMA_MAX_TOTAL_SIZE                              \
3994         (((HMA_PAGE_SIZE << HMA_PAGE_ORDER) *           \
3995           HMA_MAX_NO_FW_ADDRESS) >> 20)
3996
3997 static void adap_free_hma_mem(struct adapter *adapter)
3998 {
3999         struct scatterlist *iter;
4000         struct page *page;
4001         int i;
4002
4003         if (!adapter->hma.sgt)
4004                 return;
4005
4006         if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
4007                 dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
4008                              adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL);
4009                 adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
4010         }
4011
4012         for_each_sg(adapter->hma.sgt->sgl, iter,
4013                     adapter->hma.sgt->orig_nents, i) {
4014                 page = sg_page(iter);
4015                 if (page)
4016                         __free_pages(page, HMA_PAGE_ORDER);
4017         }
4018
4019         kfree(adapter->hma.phy_addr);
4020         sg_free_table(adapter->hma.sgt);
4021         kfree(adapter->hma.sgt);
4022         adapter->hma.sgt = NULL;
4023 }
4024
4025 static int adap_config_hma(struct adapter *adapter)
4026 {
4027         struct scatterlist *sgl, *iter;
4028         struct sg_table *sgt;
4029         struct page *newpage;
4030         unsigned int i, j, k;
4031         u32 param, hma_size;
4032         unsigned int ncmds;
4033         size_t page_size;
4034         u32 page_order;
4035         int node, ret;
4036
4037         /* HMA is supported only for T6+ cards.
4038          * Avoid initializing HMA in kdump kernels.
4039          */
4040         if (is_kdump_kernel() ||
4041             CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
4042                 return 0;
4043
4044         /* Get the HMA region size required by fw */
4045         param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4046                  FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE));
4047         ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
4048                               1, &param, &hma_size);
4049         /* An error means card has its own memory or HMA is not supported by
4050          * the firmware. Return without any errors.
4051          */
4052         if (ret || !hma_size)
4053                 return 0;
4054
4055         if (hma_size < HMA_MIN_TOTAL_SIZE ||
4056             hma_size > HMA_MAX_TOTAL_SIZE) {
4057                 dev_err(adapter->pdev_dev,
4058                         "HMA size %uMB beyond bounds(%u-%lu)MB\n",
4059                         hma_size, HMA_MIN_TOTAL_SIZE, HMA_MAX_TOTAL_SIZE);
4060                 return -EINVAL;
4061         }
4062
4063         page_size = HMA_PAGE_SIZE;
4064         page_order = HMA_PAGE_ORDER;
4065         adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL);
4066         if (unlikely(!adapter->hma.sgt)) {
4067                 dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n");
4068                 return -ENOMEM;
4069         }
4070         sgt = adapter->hma.sgt;
4071         /* FW returned value will be in MB's
4072          */
4073         sgt->orig_nents = (hma_size << 20) / (page_size << page_order);
4074         if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) {
4075                 dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n");
4076                 kfree(adapter->hma.sgt);
4077                 adapter->hma.sgt = NULL;
4078                 return -ENOMEM;
4079         }
4080
4081         sgl = adapter->hma.sgt->sgl;
4082         node = dev_to_node(adapter->pdev_dev);
4083         for_each_sg(sgl, iter, sgt->orig_nents, i) {
4084                 newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL |
4085                                            __GFP_ZERO, page_order);
4086                 if (!newpage) {
4087                         dev_err(adapter->pdev_dev,
4088                                 "Not enough memory for HMA page allocation\n");
4089                         ret = -ENOMEM;
4090                         goto free_hma;
4091                 }
4092                 sg_set_page(iter, newpage, page_size << page_order, 0);
4093         }
4094
4095         sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents,
4096                                 DMA_BIDIRECTIONAL);
4097         if (!sgt->nents) {
4098                 dev_err(adapter->pdev_dev,
4099                         "Not enough memory for HMA DMA mapping");
4100                 ret = -ENOMEM;
4101                 goto free_hma;
4102         }
4103         adapter->hma.flags |= HMA_DMA_MAPPED_FLAG;
4104
4105         adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t),
4106                                         GFP_KERNEL);
4107         if (unlikely(!adapter->hma.phy_addr))
4108                 goto free_hma;
4109
4110         for_each_sg(sgl, iter, sgt->nents, i) {
4111                 newpage = sg_page(iter);
4112                 adapter->hma.phy_addr[i] = sg_dma_address(iter);
4113         }
4114
4115         ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD);
4116         /* Pass on the addresses to firmware */
4117         for (i = 0, k = 0; i < ncmds; i++, k += HMA_MAX_ADDR_IN_CMD) {
4118                 struct fw_hma_cmd hma_cmd;
4119                 u8 naddr = HMA_MAX_ADDR_IN_CMD;
4120                 u8 soc = 0, eoc = 0;
4121                 u8 hma_mode = 1; /* Presently we support only Page table mode */
4122
4123                 soc = (i == 0) ? 1 : 0;
4124                 eoc = (i == ncmds - 1) ? 1 : 0;
4125
4126                 /* For last cmd, set naddr corresponding to remaining
4127                  * addresses
4128                  */
4129                 if (i == ncmds - 1) {
4130                         naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD;
4131                         naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD;
4132                 }
4133                 memset(&hma_cmd, 0, sizeof(hma_cmd));
4134                 hma_cmd.op_pkd = htonl(FW_CMD_OP_V(FW_HMA_CMD) |
4135                                        FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
4136                 hma_cmd.retval_len16 = htonl(FW_LEN16(hma_cmd));
4137
4138                 hma_cmd.mode_to_pcie_params =
4139                         htonl(FW_HMA_CMD_MODE_V(hma_mode) |
4140                               FW_HMA_CMD_SOC_V(soc) | FW_HMA_CMD_EOC_V(eoc));
4141
4142                 /* HMA cmd size specified in MB's */
4143                 hma_cmd.naddr_size =
4144                         htonl(FW_HMA_CMD_SIZE_V(hma_size) |
4145                               FW_HMA_CMD_NADDR_V(naddr));
4146
4147                 /* Total Page size specified in units of 4K */
4148                 hma_cmd.addr_size_pkd =
4149                         htonl(FW_HMA_CMD_ADDR_SIZE_V
4150                                 ((page_size << page_order) >> 12));
4151
4152                 /* Fill the 5 addresses */
4153                 for (j = 0; j < naddr; j++) {
4154                         hma_cmd.phy_address[j] =
4155                                 cpu_to_be64(adapter->hma.phy_addr[j + k]);
4156                 }
4157                 ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd,
4158                                  sizeof(hma_cmd), &hma_cmd);
4159                 if (ret) {
4160                         dev_err(adapter->pdev_dev,
4161                                 "HMA FW command failed with err %d\n", ret);
4162                         goto free_hma;
4163                 }
4164         }
4165
4166         if (!ret)
4167                 dev_info(adapter->pdev_dev,
4168                          "Reserved %uMB host memory for HMA\n", hma_size);
4169         return ret;
4170
4171 free_hma:
4172         adap_free_hma_mem(adapter);
4173         return ret;
4174 }
4175
4176 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4177 {
4178         u32 v;
4179         int ret;
4180
4181         /* Now that we've successfully configured and initialized the adapter
4182          * can ask the Firmware what resources it has provisioned for us.
4183          */
4184         ret = t4_get_pfres(adap);
4185         if (ret) {
4186                 dev_err(adap->pdev_dev,
4187                         "Unable to retrieve resource provisioning information\n");
4188                 return ret;
4189         }
4190
4191         /* get device capabilities */
4192         memset(c, 0, sizeof(*c));
4193         c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4194                                FW_CMD_REQUEST_F | FW_CMD_READ_F);
4195         c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4196         ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
4197         if (ret < 0)
4198                 return ret;
4199
4200         c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4201                                FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
4202         ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
4203         if (ret < 0)
4204                 return ret;
4205
4206         ret = t4_config_glbl_rss(adap, adap->pf,
4207                                  FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4208                                  FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
4209                                  FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
4210         if (ret < 0)
4211                 return ret;
4212
4213         ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
4214                           MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
4215                           FW_CMD_CAP_PF);
4216         if (ret < 0)
4217                 return ret;
4218
4219         t4_sge_init(adap);
4220
4221         /* tweak some settings */
4222         t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
4223         t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
4224         t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
4225         v = t4_read_reg(adap, TP_PIO_DATA_A);
4226         t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
4227
4228         /* first 4 Tx modulation queues point to consecutive Tx channels */
4229         adap->params.tp.tx_modq_map = 0xE4;
4230         t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
4231                      TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
4232
4233         /* associate each Tx modulation queue with consecutive Tx channels */
4234         v = 0x84218421;
4235         t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4236                           &v, 1, TP_TX_SCHED_HDR_A);
4237         t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4238                           &v, 1, TP_TX_SCHED_FIFO_A);
4239         t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
4240                           &v, 1, TP_TX_SCHED_PCMD_A);
4241
4242 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4243         if (is_offload(adap)) {
4244                 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
4245                              TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4246                              TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4247                              TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4248                              TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4249                 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
4250                              TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4251                              TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4252                              TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4253                              TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4254         }
4255
4256         /* get basic stuff going */
4257         return t4_early_init(adap, adap->pf);
4258 }
4259
4260 /*
4261  * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
4262  */
4263 #define MAX_ATIDS 8192U
4264
4265 /*
4266  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4267  *
4268  * If the firmware we're dealing with has Configuration File support, then
4269  * we use that to perform all configuration
4270  */
4271
4272 /*
4273  * Tweak configuration based on module parameters, etc.  Most of these have
4274  * defaults assigned to them by Firmware Configuration Files (if we're using
4275  * them) but need to be explicitly set if we're using hard-coded
4276  * initialization.  But even in the case of using Firmware Configuration
4277  * Files, we'd like to expose the ability to change these via module
4278  * parameters so these are essentially common tweaks/settings for
4279  * Configuration Files and hard-coded initialization ...
4280  */
4281 static int adap_init0_tweaks(struct adapter *adapter)
4282 {
4283         /*
4284          * Fix up various Host-Dependent Parameters like Page Size, Cache
4285          * Line Size, etc.  The firmware default is for a 4KB Page Size and
4286          * 64B Cache Line Size ...
4287          */
4288         t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4289
4290         /*
4291          * Process module parameters which affect early initialization.
4292          */
4293         if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4294                 dev_err(&adapter->pdev->dev,
4295                         "Ignoring illegal rx_dma_offset=%d, using 2\n",
4296                         rx_dma_offset);
4297                 rx_dma_offset = 2;
4298         }
4299         t4_set_reg_field(adapter, SGE_CONTROL_A,
4300                          PKTSHIFT_V(PKTSHIFT_M),
4301                          PKTSHIFT_V(rx_dma_offset));
4302
4303         /*
4304          * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4305          * adds the pseudo header itself.
4306          */
4307         t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
4308                                CSUM_HAS_PSEUDO_HDR_F, 0);
4309
4310         return 0;
4311 }
4312
4313 /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
4314  * unto themselves and they contain their own firmware to perform their
4315  * tasks ...
4316  */
4317 static int phy_aq1202_version(const u8 *phy_fw_data,
4318                               size_t phy_fw_size)
4319 {
4320         int offset;
4321
4322         /* At offset 0x8 you're looking for the primary image's
4323          * starting offset which is 3 Bytes wide
4324          *
4325          * At offset 0xa of the primary image, you look for the offset
4326          * of the DRAM segment which is 3 Bytes wide.
4327          *
4328          * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
4329          * wide
4330          */
4331         #define be16(__p) (((__p)[0] << 8) | (__p)[1])
4332         #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
4333         #define le24(__p) (le16(__p) | ((__p)[2] << 16))
4334
4335         offset = le24(phy_fw_data + 0x8) << 12;
4336         offset = le24(phy_fw_data + offset + 0xa);
4337         return be16(phy_fw_data + offset + 0x27e);
4338
4339         #undef be16
4340         #undef le16
4341         #undef le24
4342 }
4343
4344 static struct info_10gbt_phy_fw {
4345         unsigned int phy_fw_id;         /* PCI Device ID */
4346         char *phy_fw_file;              /* /lib/firmware/ PHY Firmware file */
4347         int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
4348         int phy_flash;                  /* Has FLASH for PHY Firmware */
4349 } phy_info_array[] = {
4350         {
4351                 PHY_AQ1202_DEVICEID,
4352                 PHY_AQ1202_FIRMWARE,
4353                 phy_aq1202_version,
4354                 1,
4355         },
4356         {
4357                 PHY_BCM84834_DEVICEID,
4358                 PHY_BCM84834_FIRMWARE,
4359                 NULL,
4360                 0,
4361         },
4362         { 0, NULL, NULL },
4363 };
4364
4365 static struct info_10gbt_phy_fw *find_phy_info(int devid)
4366 {
4367         int i;
4368
4369         for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
4370                 if (phy_info_array[i].phy_fw_id == devid)
4371                         return &phy_info_array[i];
4372         }
4373         return NULL;
4374 }
4375
4376 /* Handle updating of chip-external 10Gb/s-BT PHY firmware.  This needs to
4377  * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD.  On error
4378  * we return a negative error number.  If we transfer new firmware we return 1
4379  * (from t4_load_phy_fw()).  If we don't do anything we return 0.
4380  */
4381 static int adap_init0_phy(struct adapter *adap)
4382 {
4383         const struct firmware *phyf;
4384         int ret;
4385         struct info_10gbt_phy_fw *phy_info;
4386
4387         /* Use the device ID to determine which PHY file to flash.
4388          */
4389         phy_info = find_phy_info(adap->pdev->device);
4390         if (!phy_info) {
4391                 dev_warn(adap->pdev_dev,
4392                          "No PHY Firmware file found for this PHY\n");
4393                 return -EOPNOTSUPP;
4394         }
4395
4396         /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
4397          * use that. The adapter firmware provides us with a memory buffer
4398          * where we can load a PHY firmware file from the host if we want to
4399          * override the PHY firmware File in flash.
4400          */
4401         ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
4402                                       adap->pdev_dev);
4403         if (ret < 0) {
4404                 /* For adapters without FLASH attached to PHY for their
4405                  * firmware, it's obviously a fatal error if we can't get the
4406                  * firmware to the adapter.  For adapters with PHY firmware
4407                  * FLASH storage, it's worth a warning if we can't find the
4408                  * PHY Firmware but we'll neuter the error ...
4409                  */
4410                 dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
4411                         "/lib/firmware/%s, error %d\n",
4412                         phy_info->phy_fw_file, -ret);
4413                 if (phy_info->phy_flash) {
4414                         int cur_phy_fw_ver = 0;
4415
4416                         t4_phy_fw_ver(adap, &cur_phy_fw_ver);
4417                         dev_warn(adap->pdev_dev, "continuing with, on-adapter "
4418                                  "FLASH copy, version %#x\n", cur_phy_fw_ver);
4419                         ret = 0;
4420                 }
4421
4422                 return ret;
4423         }
4424
4425         /* Load PHY Firmware onto adapter.
4426          */
4427         spin_lock_bh(&adap->win0_lock);
4428         ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
4429                              (u8 *)phyf->data, phyf->size);
4430         spin_unlock_bh(&adap->win0_lock);
4431         if (ret < 0)
4432                 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
4433                         -ret);
4434         else if (ret > 0) {
4435                 int new_phy_fw_ver = 0;
4436
4437                 if (phy_info->phy_fw_version)
4438                         new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
4439                                                                   phyf->size);
4440                 dev_info(adap->pdev_dev, "Successfully transferred PHY "
4441                          "Firmware /lib/firmware/%s, version %#x\n",
4442                          phy_info->phy_fw_file, new_phy_fw_ver);
4443         }
4444
4445         release_firmware(phyf);
4446
4447         return ret;
4448 }
4449
4450 /*
4451  * Attempt to initialize the adapter via a Firmware Configuration File.
4452  */
4453 static int adap_init0_config(struct adapter *adapter, int reset)
4454 {
4455         char *fw_config_file, fw_config_file_path[256];
4456         u32 finiver, finicsum, cfcsum, param, val;
4457         struct fw_caps_config_cmd caps_cmd;
4458         unsigned long mtype = 0, maddr = 0;
4459         const struct firmware *cf;
4460         char *config_name = NULL;
4461         int config_issued = 0;
4462         int ret;
4463
4464         /*
4465          * Reset device if necessary.
4466          */
4467         if (reset) {
4468                 ret = t4_fw_reset(adapter, adapter->mbox,
4469                                   PIORSTMODE_F | PIORST_F);
4470                 if (ret < 0)
4471                         goto bye;
4472         }
4473
4474         /* If this is a 10Gb/s-BT adapter make sure the chip-external
4475          * 10Gb/s-BT PHYs have up-to-date firmware.  Note that this step needs
4476          * to be performed after any global adapter RESET above since some
4477          * PHYs only have local RAM copies of the PHY firmware.
4478          */
4479         if (is_10gbt_device(adapter->pdev->device)) {
4480                 ret = adap_init0_phy(adapter);
4481                 if (ret < 0)
4482                         goto bye;
4483         }
4484         /*
4485          * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4486          * then use that.  Otherwise, use the configuration file stored
4487          * in the adapter flash ...
4488          */
4489         switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
4490         case CHELSIO_T4:
4491                 fw_config_file = FW4_CFNAME;
4492                 break;
4493         case CHELSIO_T5:
4494                 fw_config_file = FW5_CFNAME;
4495                 break;
4496         case CHELSIO_T6:
4497                 fw_config_file = FW6_CFNAME;
4498                 break;
4499         default:
4500                 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4501                        adapter->pdev->device);
4502                 ret = -EINVAL;
4503                 goto bye;
4504         }
4505
4506         ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4507         if (ret < 0) {
4508                 config_name = "On FLASH";
4509                 mtype = FW_MEMTYPE_CF_FLASH;
4510                 maddr = t4_flash_cfg_addr(adapter);
4511         } else {
4512                 u32 params[7], val[7];
4513
4514                 sprintf(fw_config_file_path,
4515                         "/lib/firmware/%s", fw_config_file);
4516                 config_name = fw_config_file_path;
4517
4518                 if (cf->size >= FLASH_CFG_MAX_SIZE)
4519                         ret = -ENOMEM;
4520                 else {
4521                         params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4522                              FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
4523                         ret = t4_query_params(adapter, adapter->mbox,
4524                                               adapter->pf, 0, 1, params, val);
4525                         if (ret == 0) {
4526                                 /*
4527                                  * For t4_memory_rw() below addresses and
4528                                  * sizes have to be in terms of multiples of 4
4529                                  * bytes.  So, if the Configuration File isn't
4530                                  * a multiple of 4 bytes in length we'll have
4531                                  * to write that out separately since we can't
4532                                  * guarantee that the bytes following the
4533                                  * residual byte in the buffer returned by
4534                                  * request_firmware() are zeroed out ...
4535                                  */
4536                                 size_t resid = cf->size & 0x3;
4537                                 size_t size = cf->size & ~0x3;
4538                                 __be32 *data = (__be32 *)cf->data;
4539
4540                                 mtype = FW_PARAMS_PARAM_Y_G(val[0]);
4541                                 maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
4542
4543                                 spin_lock(&adapter->win0_lock);
4544                                 ret = t4_memory_rw(adapter, 0, mtype, maddr,
4545                                                    size, data, T4_MEMORY_WRITE);
4546                                 if (ret == 0 && resid != 0) {
4547                                         union {
4548                                                 __be32 word;
4549                                                 char buf[4];
4550                                         } last;
4551                                         int i;
4552
4553                                         last.word = data[size >> 2];
4554                                         for (i = resid; i < 4; i++)
4555                                                 last.buf[i] = 0;
4556                                         ret = t4_memory_rw(adapter, 0, mtype,
4557                                                            maddr + size,
4558                                                            4, &last.word,
4559                                                            T4_MEMORY_WRITE);
4560                                 }
4561                                 spin_unlock(&adapter->win0_lock);
4562                         }
4563                 }
4564
4565                 release_firmware(cf);
4566                 if (ret)
4567                         goto bye;
4568         }
4569
4570         val = 0;
4571
4572         /* Ofld + Hash filter is supported. Older fw will fail this request and
4573          * it is fine.
4574          */
4575         param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4576                  FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HASHFILTER_WITH_OFLD));
4577         ret = t4_set_params(adapter, adapter->mbox, adapter->pf, 0,
4578                             1, &param, &val);
4579
4580         /* FW doesn't know about Hash filter + ofld support,
4581          * it's not a problem, don't return an error.
4582          */
4583         if (ret < 0) {
4584                 dev_warn(adapter->pdev_dev,
4585                          "Hash filter with ofld is not supported by FW\n");
4586         }
4587
4588         /*
4589          * Issue a Capability Configuration command to the firmware to get it
4590          * to parse the Configuration File.  We don't use t4_fw_config_file()
4591          * because we want the ability to modify various features after we've
4592          * processed the configuration file ...
4593          */
4594         memset(&caps_cmd, 0, sizeof(caps_cmd));
4595         caps_cmd.op_to_write =
4596                 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4597                       FW_CMD_REQUEST_F |
4598                       FW_CMD_READ_F);
4599         caps_cmd.cfvalid_to_len16 =
4600                 htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
4601                       FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
4602                       FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
4603                       FW_LEN16(caps_cmd));
4604         ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4605                          &caps_cmd);
4606
4607         /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4608          * Configuration File in FLASH), our last gasp effort is to use the
4609          * Firmware Configuration File which is embedded in the firmware.  A
4610          * very few early versions of the firmware didn't have one embedded
4611          * but we can ignore those.
4612          */
4613         if (ret == -ENOENT) {
4614                 memset(&caps_cmd, 0, sizeof(caps_cmd));
4615                 caps_cmd.op_to_write =
4616                         htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4617                                         FW_CMD_REQUEST_F |
4618                                         FW_CMD_READ_F);
4619                 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4620                 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4621                                 sizeof(caps_cmd), &caps_cmd);
4622                 config_name = "Firmware Default";
4623         }
4624
4625         config_issued = 1;
4626         if (ret < 0)
4627                 goto bye;
4628
4629         finiver = ntohl(caps_cmd.finiver);
4630         finicsum = ntohl(caps_cmd.finicsum);
4631         cfcsum = ntohl(caps_cmd.cfcsum);
4632         if (finicsum != cfcsum)
4633                 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4634                          "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4635                          finicsum, cfcsum);
4636
4637         /*
4638          * And now tell the firmware to use the configuration we just loaded.
4639          */
4640         caps_cmd.op_to_write =
4641                 htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
4642                       FW_CMD_REQUEST_F |
4643                       FW_CMD_WRITE_F);
4644         caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4645         ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4646                          NULL);
4647         if (ret < 0)
4648                 goto bye;
4649
4650         /*
4651          * Tweak configuration based on system architecture, module
4652          * parameters, etc.
4653          */
4654         ret = adap_init0_tweaks(adapter);
4655         if (ret < 0)
4656                 goto bye;
4657
4658         /* We will proceed even if HMA init fails. */
4659         ret = adap_config_hma(adapter);
4660         if (ret)
4661                 dev_err(adapter->pdev_dev,
4662                         "HMA configuration failed with error %d\n", ret);
4663
4664         if (is_t6(adapter->params.chip)) {
4665                 adap_config_hpfilter(adapter);
4666                 ret = setup_ppod_edram(adapter);
4667                 if (!ret)
4668                         dev_info(adapter->pdev_dev, "Successfully enabled "
4669                                  "ppod edram feature\n");
4670         }
4671
4672         /*
4673          * And finally tell the firmware to initialize itself using the
4674          * parameters from the Configuration File.
4675          */
4676         ret = t4_fw_initialize(adapter, adapter->mbox);
4677         if (ret < 0)
4678                 goto bye;
4679
4680         /* Emit Firmware Configuration File information and return
4681          * successfully.
4682          */
4683         dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4684                  "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4685                  config_name, finiver, cfcsum);
4686         return 0;
4687
4688         /*
4689          * Something bad happened.  Return the error ...  (If the "error"
4690          * is that there's no Configuration File on the adapter we don't
4691          * want to issue a warning since this is fairly common.)
4692          */
4693 bye:
4694         if (config_issued && ret != -ENOENT)
4695                 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4696                          config_name, -ret);
4697         return ret;
4698 }
4699
4700 static struct fw_info fw_info_array[] = {
4701         {
4702                 .chip = CHELSIO_T4,
4703                 .fs_name = FW4_CFNAME,
4704                 .fw_mod_name = FW4_FNAME,
4705                 .fw_hdr = {
4706                         .chip = FW_HDR_CHIP_T4,
4707                         .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
4708                         .intfver_nic = FW_INTFVER(T4, NIC),
4709                         .intfver_vnic = FW_INTFVER(T4, VNIC),
4710                         .intfver_ri = FW_INTFVER(T4, RI),
4711                         .intfver_iscsi = FW_INTFVER(T4, ISCSI),
4712                         .intfver_fcoe = FW_INTFVER(T4, FCOE),
4713                 },
4714         }, {
4715                 .chip = CHELSIO_T5,
4716                 .fs_name = FW5_CFNAME,
4717                 .fw_mod_name = FW5_FNAME,
4718                 .fw_hdr = {
4719                         .chip = FW_HDR_CHIP_T5,
4720                         .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
4721                         .intfver_nic = FW_INTFVER(T5, NIC),
4722                         .intfver_vnic = FW_INTFVER(T5, VNIC),
4723                         .intfver_ri = FW_INTFVER(T5, RI),
4724                         .intfver_iscsi = FW_INTFVER(T5, ISCSI),
4725                         .intfver_fcoe = FW_INTFVER(T5, FCOE),
4726                 },
4727         }, {
4728                 .chip = CHELSIO_T6,
4729                 .fs_name = FW6_CFNAME,
4730                 .fw_mod_name = FW6_FNAME,
4731                 .fw_hdr = {
4732                         .chip = FW_HDR_CHIP_T6,
4733                         .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
4734                         .intfver_nic = FW_INTFVER(T6, NIC),
4735                         .intfver_vnic = FW_INTFVER(T6, VNIC),
4736                         .intfver_ofld = FW_INTFVER(T6, OFLD),
4737                         .intfver_ri = FW_INTFVER(T6, RI),
4738                         .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
4739                         .intfver_iscsi = FW_INTFVER(T6, ISCSI),
4740                         .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
4741                         .intfver_fcoe = FW_INTFVER(T6, FCOE),
4742                 },
4743         }
4744
4745 };
4746
4747 static struct fw_info *find_fw_info(int chip)
4748 {
4749         int i;
4750
4751         for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
4752                 if (fw_info_array[i].chip == chip)
4753                         return &fw_info_array[i];
4754         }
4755         return NULL;
4756 }
4757
4758 /*
4759  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4760  */
4761 static int adap_init0(struct adapter *adap, int vpd_skip)
4762 {
4763         struct fw_caps_config_cmd caps_cmd;
4764         u32 params[7], val[7];
4765         enum dev_state state;
4766         u32 v, port_vec;
4767         int reset = 1;
4768         int ret;
4769
4770         /* Grab Firmware Device Log parameters as early as possible so we have
4771          * access to it for debugging, etc.
4772          */
4773         ret = t4_init_devlog_params(adap);
4774         if (ret < 0)
4775                 return ret;
4776
4777         /* Contact FW, advertising Master capability */
4778         ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
4779                           is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
4780         if (ret < 0) {
4781                 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
4782                         ret);
4783                 return ret;
4784         }
4785         if (ret == adap->mbox)
4786                 adap->flags |= CXGB4_MASTER_PF;
4787
4788         /*
4789          * If we're the Master PF Driver and the device is uninitialized,
4790          * then let's consider upgrading the firmware ...  (We always want
4791          * to check the firmware version number in order to A. get it for
4792          * later reporting and B. to warn if the currently loaded firmware
4793          * is excessively mismatched relative to the driver.)
4794          */
4795
4796         t4_get_version_info(adap);
4797         ret = t4_check_fw_version(adap);
4798         /* If firmware is too old (not supported by driver) force an update. */
4799         if (ret)
4800                 state = DEV_STATE_UNINIT;
4801         if ((adap->flags & CXGB4_MASTER_PF) && state != DEV_STATE_INIT) {
4802                 struct fw_info *fw_info;
4803                 struct fw_hdr *card_fw;
4804                 const struct firmware *fw;
4805                 const u8 *fw_data = NULL;
4806                 unsigned int fw_size = 0;
4807
4808                 /* This is the firmware whose headers the driver was compiled
4809                  * against
4810                  */
4811                 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
4812                 if (fw_info == NULL) {
4813                         dev_err(adap->pdev_dev,
4814                                 "unable to get firmware info for chip %d.\n",
4815                                 CHELSIO_CHIP_VERSION(adap->params.chip));
4816                         return -EINVAL;
4817                 }
4818
4819                 /* allocate memory to read the header of the firmware on the
4820                  * card
4821                  */
4822                 card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
4823                 if (!card_fw) {
4824                         ret = -ENOMEM;
4825                         goto bye;
4826                 }
4827
4828                 /* Get FW from from /lib/firmware/ */
4829                 ret = request_firmware(&fw, fw_info->fw_mod_name,
4830                                        adap->pdev_dev);
4831                 if (ret < 0) {
4832                         dev_err(adap->pdev_dev,
4833                                 "unable to load firmware image %s, error %d\n",
4834                                 fw_info->fw_mod_name, ret);
4835                 } else {
4836                         fw_data = fw->data;
4837                         fw_size = fw->size;
4838                 }
4839
4840                 /* upgrade FW logic */
4841                 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
4842                                  state, &reset);
4843
4844                 /* Cleaning up */
4845                 release_firmware(fw);
4846                 kvfree(card_fw);
4847
4848                 if (ret < 0)
4849                         goto bye;
4850         }
4851
4852         /* If the firmware is initialized already, emit a simply note to that
4853          * effect. Otherwise, it's time to try initializing the adapter.
4854          */
4855         if (state == DEV_STATE_INIT) {
4856                 ret = adap_config_hma(adap);
4857                 if (ret)
4858                         dev_err(adap->pdev_dev,
4859                                 "HMA configuration failed with error %d\n",
4860                                 ret);
4861                 dev_info(adap->pdev_dev, "Coming up as %s: "\
4862                          "Adapter already initialized\n",
4863                          adap->flags & CXGB4_MASTER_PF ? "MASTER" : "SLAVE");
4864         } else {
4865                 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
4866                          "Initializing adapter\n");
4867
4868                 /* Find out whether we're dealing with a version of the
4869                  * firmware which has configuration file support.
4870                  */
4871                 params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4872                              FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
4873                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
4874                                       params, val);
4875
4876                 /* If the firmware doesn't support Configuration Files,
4877                  * return an error.
4878                  */
4879                 if (ret < 0) {
4880                         dev_err(adap->pdev_dev, "firmware doesn't support "
4881                                 "Firmware Configuration Files\n");
4882                         goto bye;
4883                 }
4884
4885                 /* The firmware provides us with a memory buffer where we can
4886                  * load a Configuration File from the host if we want to
4887                  * override the Configuration File in flash.
4888                  */
4889                 ret = adap_init0_config(adap, reset);
4890                 if (ret == -ENOENT) {
4891                         dev_err(adap->pdev_dev, "no Configuration File "
4892                                 "present on adapter.\n");
4893                         goto bye;
4894                 }
4895                 if (ret < 0) {
4896                         dev_err(adap->pdev_dev, "could not initialize "
4897                                 "adapter, error %d\n", -ret);
4898                         goto bye;
4899                 }
4900         }
4901
4902         /* Now that we've successfully configured and initialized the adapter
4903          * (or found it already initialized), we can ask the Firmware what
4904          * resources it has provisioned for us.
4905          */
4906         ret = t4_get_pfres(adap);
4907         if (ret) {
4908                 dev_err(adap->pdev_dev,
4909                         "Unable to retrieve resource provisioning information\n");
4910                 goto bye;
4911         }
4912
4913         /* Grab VPD parameters.  This should be done after we establish a
4914          * connection to the firmware since some of the VPD parameters
4915          * (notably the Core Clock frequency) are retrieved via requests to
4916          * the firmware.  On the other hand, we need these fairly early on
4917          * so we do this right after getting ahold of the firmware.
4918          *
4919          * We need to do this after initializing the adapter because someone
4920          * could have FLASHed a new VPD which won't be read by the firmware
4921          * until we do the RESET ...
4922          */
4923         if (!vpd_skip) {
4924                 ret = t4_get_vpd_params(adap, &adap->params.vpd);
4925                 if (ret < 0)
4926                         goto bye;
4927         }
4928
4929         /* Find out what ports are available to us.  Note that we need to do
4930          * this before calling adap_init0_no_config() since it needs nports
4931          * and portvec ...
4932          */
4933         v =
4934             FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4935             FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
4936         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
4937         if (ret < 0)
4938                 goto bye;
4939
4940         adap->params.nports = hweight32(port_vec);
4941         adap->params.portvec = port_vec;
4942
4943         /* Give the SGE code a chance to pull in anything that it needs ...
4944          * Note that this must be called after we retrieve our VPD parameters
4945          * in order to know how to convert core ticks to seconds, etc.
4946          */
4947         ret = t4_sge_init(adap);
4948         if (ret < 0)
4949                 goto bye;
4950
4951         /* Grab the SGE Doorbell Queue Timer values.  If successful, that
4952          * indicates that the Firmware and Hardware support this.
4953          */
4954         params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
4955                     FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DBQ_TIMERTICK));
4956         ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
4957                               1, params, val);
4958
4959         if (!ret) {
4960                 adap->sge.dbqtimer_tick = val[0];
4961                 ret = t4_read_sge_dbqtimers(adap,
4962                                             ARRAY_SIZE(adap->sge.dbqtimer_val),
4963                                             adap->sge.dbqtimer_val);
4964         }
4965
4966         if (!ret)
4967                 adap->flags |= CXGB4_SGE_DBQ_TIMER;
4968
4969         if (is_bypass_device(adap->pdev->device))
4970                 adap->params.bypass = 1;
4971
4972         /*
4973          * Grab some of our basic fundamental operating parameters.
4974          */
4975         params[0] = FW_PARAM_PFVF(EQ_START);
4976         params[1] = FW_PARAM_PFVF(L2T_START);
4977         params[2] = FW_PARAM_PFVF(L2T_END);
4978         params[3] = FW_PARAM_PFVF(FILTER_START);
4979         params[4] = FW_PARAM_PFVF(FILTER_END);
4980         params[5] = FW_PARAM_PFVF(IQFLINT_START);
4981         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
4982         if (ret < 0)
4983                 goto bye;
4984         adap->sge.egr_start = val[0];
4985         adap->l2t_start = val[1];
4986         adap->l2t_end = val[2];
4987         adap->tids.ftid_base = val[3];
4988         adap->tids.nftids = val[4] - val[3] + 1;
4989         adap->sge.ingr_start = val[5];
4990
4991         if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
4992                 params[0] = FW_PARAM_PFVF(HPFILTER_START);
4993                 params[1] = FW_PARAM_PFVF(HPFILTER_END);
4994                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
4995                                       params, val);
4996                 if (ret < 0)
4997                         goto bye;
4998
4999                 adap->tids.hpftid_base = val[0];
5000                 adap->tids.nhpftids = val[1] - val[0] + 1;
5001
5002                 /* Read the raw mps entries. In T6, the last 2 tcam entries
5003                  * are reserved for raw mac addresses (rawf = 2, one per port).
5004                  */
5005                 params[0] = FW_PARAM_PFVF(RAWF_START);
5006                 params[1] = FW_PARAM_PFVF(RAWF_END);
5007                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5008                                       params, val);
5009                 if (ret == 0) {
5010                         adap->rawf_start = val[0];
5011                         adap->rawf_cnt = val[1] - val[0] + 1;
5012                 }
5013
5014                 adap->tids.tid_base =
5015                         t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
5016         }
5017
5018         /* qids (ingress/egress) returned from firmware can be anywhere
5019          * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
5020          * Hence driver needs to allocate memory for this range to
5021          * store the queue info. Get the highest IQFLINT/EQ index returned
5022          * in FW_EQ_*_CMD.alloc command.
5023          */
5024         params[0] = FW_PARAM_PFVF(EQ_END);
5025         params[1] = FW_PARAM_PFVF(IQFLINT_END);
5026         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
5027         if (ret < 0)
5028                 goto bye;
5029         adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
5030         adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
5031
5032         adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
5033                                     sizeof(*adap->sge.egr_map), GFP_KERNEL);
5034         if (!adap->sge.egr_map) {
5035                 ret = -ENOMEM;
5036                 goto bye;
5037         }
5038
5039         adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
5040                                      sizeof(*adap->sge.ingr_map), GFP_KERNEL);
5041         if (!adap->sge.ingr_map) {
5042                 ret = -ENOMEM;
5043                 goto bye;
5044         }
5045
5046         /* Allocate the memory for the vaious egress queue bitmaps
5047          * ie starving_fl, txq_maperr and blocked_fl.
5048          */
5049         adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
5050                                         sizeof(long), GFP_KERNEL);
5051         if (!adap->sge.starving_fl) {
5052                 ret = -ENOMEM;
5053                 goto bye;
5054         }
5055
5056         adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
5057                                        sizeof(long), GFP_KERNEL);
5058         if (!adap->sge.txq_maperr) {
5059                 ret = -ENOMEM;
5060                 goto bye;
5061         }
5062
5063 #ifdef CONFIG_DEBUG_FS
5064         adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
5065                                        sizeof(long), GFP_KERNEL);
5066         if (!adap->sge.blocked_fl) {
5067                 ret = -ENOMEM;
5068                 goto bye;
5069         }
5070 #endif
5071
5072         params[0] = FW_PARAM_PFVF(CLIP_START);
5073         params[1] = FW_PARAM_PFVF(CLIP_END);
5074         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
5075         if (ret < 0)
5076                 goto bye;
5077         adap->clipt_start = val[0];
5078         adap->clipt_end = val[1];
5079
5080         /* Get the supported number of traffic classes */
5081         params[0] = FW_PARAM_DEV(NUM_TM_CLASS);
5082         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
5083         if (ret < 0) {
5084                 /* We couldn't retrieve the number of Traffic Classes
5085                  * supported by the hardware/firmware. So we hard
5086                  * code it here.
5087                  */
5088                 adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
5089         } else {
5090                 adap->params.nsched_cls = val[0];
5091         }
5092
5093         /* query params related to active filter region */
5094         params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5095         params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5096         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
5097         /* If Active filter size is set we enable establishing
5098          * offload connection through firmware work request
5099          */
5100         if ((val[0] != val[1]) && (ret >= 0)) {
5101                 adap->flags |= CXGB4_FW_OFLD_CONN;
5102                 adap->tids.aftid_base = val[0];
5103                 adap->tids.aftid_end = val[1];
5104         }
5105
5106         /* If we're running on newer firmware, let it know that we're
5107          * prepared to deal with encapsulated CPL messages.  Older
5108          * firmware won't understand this and we'll just get
5109          * unencapsulated messages ...
5110          */
5111         params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5112         val[0] = 1;
5113         (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
5114
5115         /*
5116          * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5117          * capability.  Earlier versions of the firmware didn't have the
5118          * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5119          * permission to use ULPTX MEMWRITE DSGL.
5120          */
5121         if (is_t4(adap->params.chip)) {
5122                 adap->params.ulptx_memwrite_dsgl = false;
5123         } else {
5124                 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5125                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5126                                       1, params, val);
5127                 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5128         }
5129
5130         /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
5131         params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
5132         ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5133                               1, params, val);
5134         adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
5135
5136         /* See if FW supports FW_FILTER2 work request */
5137         if (is_t4(adap->params.chip)) {
5138                 adap->params.filter2_wr_support = false;
5139         } else {
5140                 params[0] = FW_PARAM_DEV(FILTER2_WR);
5141                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5142                                       1, params, val);
5143                 adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
5144         }
5145
5146         /* Check if FW supports returning vin and smt index.
5147          * If this is not supported, driver will interpret
5148          * these values from viid.
5149          */
5150         params[0] = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN);
5151         ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5152                               1, params, val);
5153         adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0);
5154
5155         /*
5156          * Get device capabilities so we can determine what resources we need
5157          * to manage.
5158          */
5159         memset(&caps_cmd, 0, sizeof(caps_cmd));
5160         caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
5161                                      FW_CMD_REQUEST_F | FW_CMD_READ_F);
5162         caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5163         ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5164                          &caps_cmd);
5165         if (ret < 0)
5166                 goto bye;
5167
5168         /* hash filter has some mandatory register settings to be tested and for
5169          * that it needs to test whether offload is enabled or not, hence
5170          * checking and setting it here.
5171          */
5172         if (caps_cmd.ofldcaps)
5173                 adap->params.offload = 1;
5174
5175         if (caps_cmd.ofldcaps ||
5176             (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) ||
5177             (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD))) {
5178                 /* query offload-related parameters */
5179                 params[0] = FW_PARAM_DEV(NTID);
5180                 params[1] = FW_PARAM_PFVF(SERVER_START);
5181                 params[2] = FW_PARAM_PFVF(SERVER_END);
5182                 params[3] = FW_PARAM_PFVF(TDDP_START);
5183                 params[4] = FW_PARAM_PFVF(TDDP_END);
5184                 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5185                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
5186                                       params, val);
5187                 if (ret < 0)
5188                         goto bye;
5189                 adap->tids.ntids = val[0];
5190                 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5191                 adap->tids.stid_base = val[1];
5192                 adap->tids.nstids = val[2] - val[1] + 1;
5193                 /*
5194                  * Setup server filter region. Divide the available filter
5195                  * region into two parts. Regular filters get 1/3rd and server
5196                  * filters get 2/3rd part. This is only enabled if workarond
5197                  * path is enabled.
5198                  * 1. For regular filters.
5199                  * 2. Server filter: This are special filters which are used
5200                  * to redirect SYN packets to offload queue.
5201                  */
5202                 if (adap->flags & CXGB4_FW_OFLD_CONN && !is_bypass(adap)) {
5203                         adap->tids.sftid_base = adap->tids.ftid_base +
5204                                         DIV_ROUND_UP(adap->tids.nftids, 3);
5205                         adap->tids.nsftids = adap->tids.nftids -
5206                                          DIV_ROUND_UP(adap->tids.nftids, 3);
5207                         adap->tids.nftids = adap->tids.sftid_base -
5208                                                 adap->tids.ftid_base;
5209                 }
5210                 adap->vres.ddp.start = val[3];
5211                 adap->vres.ddp.size = val[4] - val[3] + 1;
5212                 adap->params.ofldq_wr_cred = val[5];
5213
5214                 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
5215                         init_hash_filter(adap);
5216                 } else {
5217                         adap->num_ofld_uld += 1;
5218                 }
5219
5220                 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_ETHOFLD)) {
5221                         params[0] = FW_PARAM_PFVF(ETHOFLD_START);
5222                         params[1] = FW_PARAM_PFVF(ETHOFLD_END);
5223                         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5224                                               params, val);
5225                         if (!ret) {
5226                                 adap->tids.eotid_base = val[0];
5227                                 adap->tids.neotids = min_t(u32, MAX_ATIDS,
5228                                                            val[1] - val[0] + 1);
5229                                 adap->params.ethofld = 1;
5230                         }
5231                 }
5232         }
5233         if (caps_cmd.rdmacaps) {
5234                 params[0] = FW_PARAM_PFVF(STAG_START);
5235                 params[1] = FW_PARAM_PFVF(STAG_END);
5236                 params[2] = FW_PARAM_PFVF(RQ_START);
5237                 params[3] = FW_PARAM_PFVF(RQ_END);
5238                 params[4] = FW_PARAM_PFVF(PBL_START);
5239                 params[5] = FW_PARAM_PFVF(PBL_END);
5240                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
5241                                       params, val);
5242                 if (ret < 0)
5243                         goto bye;
5244                 adap->vres.stag.start = val[0];
5245                 adap->vres.stag.size = val[1] - val[0] + 1;
5246                 adap->vres.rq.start = val[2];
5247                 adap->vres.rq.size = val[3] - val[2] + 1;
5248                 adap->vres.pbl.start = val[4];
5249                 adap->vres.pbl.size = val[5] - val[4] + 1;
5250
5251                 params[0] = FW_PARAM_PFVF(SRQ_START);
5252                 params[1] = FW_PARAM_PFVF(SRQ_END);
5253                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5254                                       params, val);
5255                 if (!ret) {
5256                         adap->vres.srq.start = val[0];
5257                         adap->vres.srq.size = val[1] - val[0] + 1;
5258                 }
5259                 if (adap->vres.srq.size) {
5260                         adap->srq = t4_init_srq(adap->vres.srq.size);
5261                         if (!adap->srq)
5262                                 dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n");
5263                 }
5264
5265                 params[0] = FW_PARAM_PFVF(SQRQ_START);
5266                 params[1] = FW_PARAM_PFVF(SQRQ_END);
5267                 params[2] = FW_PARAM_PFVF(CQ_START);
5268                 params[3] = FW_PARAM_PFVF(CQ_END);
5269                 params[4] = FW_PARAM_PFVF(OCQ_START);
5270                 params[5] = FW_PARAM_PFVF(OCQ_END);
5271                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
5272                                       val);
5273                 if (ret < 0)
5274                         goto bye;
5275                 adap->vres.qp.start = val[0];
5276                 adap->vres.qp.size = val[1] - val[0] + 1;
5277                 adap->vres.cq.start = val[2];
5278                 adap->vres.cq.size = val[3] - val[2] + 1;
5279                 adap->vres.ocq.start = val[4];
5280                 adap->vres.ocq.size = val[5] - val[4] + 1;
5281
5282                 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
5283                 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
5284                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
5285                                       val);
5286                 if (ret < 0) {
5287                         adap->params.max_ordird_qp = 8;
5288                         adap->params.max_ird_adapter = 32 * adap->tids.ntids;
5289                         ret = 0;
5290                 } else {
5291                         adap->params.max_ordird_qp = val[0];
5292                         adap->params.max_ird_adapter = val[1];
5293                 }
5294                 dev_info(adap->pdev_dev,
5295                          "max_ordird_qp %d max_ird_adapter %d\n",
5296                          adap->params.max_ordird_qp,
5297                          adap->params.max_ird_adapter);
5298
5299                 /* Enable write_with_immediate if FW supports it */
5300                 params[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM);
5301                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
5302                                       val);
5303                 adap->params.write_w_imm_support = (ret == 0 && val[0] != 0);
5304
5305                 /* Enable write_cmpl if FW supports it */
5306                 params[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR);
5307                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
5308                                       val);
5309                 adap->params.write_cmpl_support = (ret == 0 && val[0] != 0);
5310                 adap->num_ofld_uld += 2;
5311         }
5312         if (caps_cmd.iscsicaps) {
5313                 params[0] = FW_PARAM_PFVF(ISCSI_START);
5314                 params[1] = FW_PARAM_PFVF(ISCSI_END);
5315                 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5316                                       params, val);
5317                 if (ret < 0)
5318                         goto bye;
5319                 adap->vres.iscsi.start = val[0];
5320                 adap->vres.iscsi.size = val[1] - val[0] + 1;
5321                 if (is_t6(adap->params.chip)) {
5322                         params[0] = FW_PARAM_PFVF(PPOD_EDRAM_START);
5323                         params[1] = FW_PARAM_PFVF(PPOD_EDRAM_END);
5324                         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
5325                                               params, val);
5326                         if (!ret) {
5327                                 adap->vres.ppod_edram.start = val[0];
5328                                 adap->vres.ppod_edram.size =
5329                                         val[1] - val[0] + 1;
5330
5331                                 dev_info(adap->pdev_dev,
5332                                          "ppod edram start 0x%x end 0x%x size 0x%x\n",
5333                                          val[0], val[1],
5334                                          adap->vres.ppod_edram.size);
5335                         }
5336                 }
5337                 /* LIO target and cxgb4i initiaitor */
5338                 adap->num_ofld_uld += 2;
5339         }
5340         if (caps_cmd.cryptocaps) {
5341                 if (ntohs(caps_cmd.cryptocaps) &
5342                     FW_CAPS_CONFIG_CRYPTO_LOOKASIDE) {
5343                         params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
5344                         ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5345                                               2, params, val);
5346                         if (ret < 0) {
5347                                 if (ret != -EINVAL)
5348                                         goto bye;
5349                         } else {
5350                                 adap->vres.ncrypto_fc = val[0];
5351                         }
5352                         adap->num_ofld_uld += 1;
5353                 }
5354                 if (ntohs(caps_cmd.cryptocaps) &
5355                     FW_CAPS_CONFIG_TLS_INLINE) {
5356                         params[0] = FW_PARAM_PFVF(TLS_START);
5357                         params[1] = FW_PARAM_PFVF(TLS_END);
5358                         ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
5359                                               2, params, val);
5360                         if (ret < 0)
5361                                 goto bye;
5362                         adap->vres.key.start = val[0];
5363                         adap->vres.key.size = val[1] - val[0] + 1;
5364                         adap->num_uld += 1;
5365                 }
5366                 adap->params.crypto = ntohs(caps_cmd.cryptocaps);
5367         }
5368
5369         /* The MTU/MSS Table is initialized by now, so load their values.  If
5370          * we're initializing the adapter, then we'll make any modifications
5371          * we want to the MTU/MSS Table and also initialize the congestion
5372          * parameters.
5373          */
5374         t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5375         if (state != DEV_STATE_INIT) {
5376                 int i;
5377
5378                 /* The default MTU Table contains values 1492 and 1500.
5379                  * However, for TCP, it's better to have two values which are
5380                  * a multiple of 8 +/- 4 bytes apart near this popular MTU.
5381                  * This allows us to have a TCP Data Payload which is a
5382                  * multiple of 8 regardless of what combination of TCP Options
5383                  * are in use (always a multiple of 4 bytes) which is
5384                  * important for performance reasons.  For instance, if no
5385                  * options are in use, then we have a 20-byte IP header and a
5386                  * 20-byte TCP header.  In this case, a 1500-byte MSS would
5387                  * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
5388                  * which is not a multiple of 8.  So using an MSS of 1488 in
5389                  * this case results in a TCP Data Payload of 1448 bytes which
5390                  * is a multiple of 8.  On the other hand, if 12-byte TCP Time
5391                  * Stamps have been negotiated, then an MTU of 1500 bytes
5392                  * results in a TCP Data Payload of 1448 bytes which, as
5393                  * above, is a multiple of 8 bytes ...
5394                  */
5395                 for (i = 0; i < NMTUS; i++)
5396                         if (adap->params.mtus[i] == 1492) {
5397                                 adap->params.mtus[i] = 1488;
5398                                 break;
5399                         }
5400
5401                 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5402                              adap->params.b_wnd);
5403         }
5404         t4_init_sge_params(adap);
5405         adap->flags |= CXGB4_FW_OK;
5406         t4_init_tp_params(adap, true);
5407         return 0;
5408
5409         /*
5410          * Something bad happened.  If a command timed out or failed with EIO
5411          * FW does not operate within its spec or something catastrophic
5412          * happened to HW/FW, stop issuing commands.
5413          */
5414 bye:
5415         adap_free_hma_mem(adap);
5416         kfree(adap->sge.egr_map);
5417         kfree(adap->sge.ingr_map);
5418         kfree(adap->sge.starving_fl);
5419         kfree(adap->sge.txq_maperr);
5420 #ifdef CONFIG_DEBUG_FS
5421         kfree(adap->sge.blocked_fl);
5422 #endif
5423         if (ret != -ETIMEDOUT && ret != -EIO)
5424                 t4_fw_bye(adap, adap->mbox);
5425         return ret;
5426 }
5427
5428 /* EEH callbacks */
5429
5430 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5431                                          pci_channel_state_t state)
5432 {
5433         int i;
5434         struct adapter *adap = pci_get_drvdata(pdev);
5435
5436         if (!adap)
5437                 goto out;
5438
5439         rtnl_lock();
5440         adap->flags &= ~CXGB4_FW_OK;
5441         notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5442         spin_lock(&adap->stats_lock);
5443         for_each_port(adap, i) {
5444                 struct net_device *dev = adap->port[i];
5445                 if (dev) {
5446                         netif_device_detach(dev);
5447                         netif_carrier_off(dev);
5448                 }
5449         }
5450         spin_unlock(&adap->stats_lock);
5451         disable_interrupts(adap);
5452         if (adap->flags & CXGB4_FULL_INIT_DONE)
5453                 cxgb_down(adap);
5454         rtnl_unlock();
5455         if ((adap->flags & CXGB4_DEV_ENABLED)) {
5456                 pci_disable_device(pdev);
5457                 adap->flags &= ~CXGB4_DEV_ENABLED;
5458         }
5459 out:    return state == pci_channel_io_perm_failure ?
5460                 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5461 }
5462
5463 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5464 {
5465         int i, ret;
5466         struct fw_caps_config_cmd c;
5467         struct adapter *adap = pci_get_drvdata(pdev);
5468
5469         if (!adap) {
5470                 pci_restore_state(pdev);
5471                 pci_save_state(pdev);
5472                 return PCI_ERS_RESULT_RECOVERED;
5473         }
5474
5475         if (!(adap->flags & CXGB4_DEV_ENABLED)) {
5476                 if (pci_enable_device(pdev)) {
5477                         dev_err(&pdev->dev, "Cannot reenable PCI "
5478                                             "device after reset\n");
5479                         return PCI_ERS_RESULT_DISCONNECT;
5480                 }
5481                 adap->flags |= CXGB4_DEV_ENABLED;
5482         }
5483
5484         pci_set_master(pdev);
5485         pci_restore_state(pdev);
5486         pci_save_state(pdev);
5487
5488         if (t4_wait_dev_ready(adap->regs) < 0)
5489                 return PCI_ERS_RESULT_DISCONNECT;
5490         if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
5491                 return PCI_ERS_RESULT_DISCONNECT;
5492         adap->flags |= CXGB4_FW_OK;
5493         if (adap_init1(adap, &c))
5494                 return PCI_ERS_RESULT_DISCONNECT;
5495
5496         for_each_port(adap, i) {
5497                 struct port_info *pi = adap2pinfo(adap, i);
5498                 u8 vivld = 0, vin = 0;
5499
5500                 ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1,
5501                                   NULL, NULL, &vivld, &vin);
5502                 if (ret < 0)
5503                         return PCI_ERS_RESULT_DISCONNECT;
5504                 pi->viid = ret;
5505                 pi->xact_addr_filt = -1;
5506                 /* If fw supports returning the VIN as part of FW_VI_CMD,
5507                  * save the returned values.
5508                  */
5509                 if (adap->params.viid_smt_extn_support) {
5510                         pi->vivld = vivld;
5511                         pi->vin = vin;
5512                 } else {
5513                         /* Retrieve the values from VIID */
5514                         pi->vivld = FW_VIID_VIVLD_G(pi->viid);
5515                         pi->vin = FW_VIID_VIN_G(pi->viid);
5516                 }
5517         }
5518
5519         t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5520                      adap->params.b_wnd);
5521         setup_memwin(adap);
5522         if (cxgb_up(adap))
5523                 return PCI_ERS_RESULT_DISCONNECT;
5524         return PCI_ERS_RESULT_RECOVERED;
5525 }
5526
5527 static void eeh_resume(struct pci_dev *pdev)
5528 {
5529         int i;
5530         struct adapter *adap = pci_get_drvdata(pdev);
5531
5532         if (!adap)
5533                 return;
5534
5535         rtnl_lock();
5536         for_each_port(adap, i) {
5537                 struct net_device *dev = adap->port[i];
5538                 if (dev) {
5539                         if (netif_running(dev)) {
5540                                 link_start(dev);
5541                                 cxgb_set_rxmode(dev);
5542                         }
5543                         netif_device_attach(dev);
5544                 }
5545         }
5546         rtnl_unlock();
5547 }
5548
5549 static void eeh_reset_prepare(struct pci_dev *pdev)
5550 {
5551         struct adapter *adapter = pci_get_drvdata(pdev);
5552         int i;
5553
5554         if (adapter->pf != 4)
5555                 return;
5556
5557         adapter->flags &= ~CXGB4_FW_OK;
5558
5559         notify_ulds(adapter, CXGB4_STATE_DOWN);
5560
5561         for_each_port(adapter, i)
5562                 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5563                         cxgb_close(adapter->port[i]);
5564
5565         disable_interrupts(adapter);
5566         cxgb4_free_mps_ref_entries(adapter);
5567
5568         adap_free_hma_mem(adapter);
5569
5570         if (adapter->flags & CXGB4_FULL_INIT_DONE)
5571                 cxgb_down(adapter);
5572 }
5573
5574 static void eeh_reset_done(struct pci_dev *pdev)
5575 {
5576         struct adapter *adapter = pci_get_drvdata(pdev);
5577         int err, i;
5578
5579         if (adapter->pf != 4)
5580                 return;
5581
5582         err = t4_wait_dev_ready(adapter->regs);
5583         if (err < 0) {
5584                 dev_err(adapter->pdev_dev,
5585                         "Device not ready, err %d", err);
5586                 return;
5587         }
5588
5589         setup_memwin(adapter);
5590
5591         err = adap_init0(adapter, 1);
5592         if (err) {
5593                 dev_err(adapter->pdev_dev,
5594                         "Adapter init failed, err %d", err);
5595                 return;
5596         }
5597
5598         setup_memwin_rdma(adapter);
5599
5600         if (adapter->flags & CXGB4_FW_OK) {
5601                 err = t4_port_init(adapter, adapter->pf, adapter->pf, 0);
5602                 if (err) {
5603                         dev_err(adapter->pdev_dev,
5604                                 "Port init failed, err %d", err);
5605                         return;
5606                 }
5607         }
5608
5609         err = cfg_queues(adapter);
5610         if (err) {
5611                 dev_err(adapter->pdev_dev,
5612                         "Config queues failed, err %d", err);
5613                 return;
5614         }
5615
5616         cxgb4_init_mps_ref_entries(adapter);
5617
5618         err = setup_fw_sge_queues(adapter);
5619         if (err) {
5620                 dev_err(adapter->pdev_dev,
5621                         "FW sge queue allocation failed, err %d", err);
5622                 return;
5623         }
5624
5625         for_each_port(adapter, i)
5626                 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
5627                         cxgb_open(adapter->port[i]);
5628 }
5629
5630 static const struct pci_error_handlers cxgb4_eeh = {
5631         .error_detected = eeh_err_detected,
5632         .slot_reset     = eeh_slot_reset,
5633         .resume         = eeh_resume,
5634         .reset_prepare  = eeh_reset_prepare,
5635         .reset_done     = eeh_reset_done,
5636 };
5637
5638 /* Return true if the Link Configuration supports "High Speeds" (those greater
5639  * than 1Gb/s).
5640  */
5641 static inline bool is_x_10g_port(const struct link_config *lc)
5642 {
5643         unsigned int speeds, high_speeds;
5644
5645         speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps));
5646         high_speeds = speeds &
5647                         ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
5648
5649         return high_speeds != 0;
5650 }
5651
5652 /* Perform default configuration of DMA queues depending on the number and type
5653  * of ports we found and the number of available CPUs.  Most settings can be
5654  * modified by the admin prior to actual use.
5655  */
5656 static int cfg_queues(struct adapter *adap)
5657 {
5658         u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
5659         u32 ncpus = num_online_cpus();
5660         u32 niqflint, neq, num_ulds;
5661         struct sge *s = &adap->sge;
5662         u32 i, n10g = 0, qidx = 0;
5663         u32 q10g = 0, q1g;
5664
5665         /* Reduce memory usage in kdump environment, disable all offload. */
5666         if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
5667                 adap->params.offload = 0;
5668                 adap->params.crypto = 0;
5669                 adap->params.ethofld = 0;
5670         }
5671
5672         /* Calculate the number of Ethernet Queue Sets available based on
5673          * resources provisioned for us.  We always have an Asynchronous
5674          * Firmware Event Ingress Queue.  If we're operating in MSI or Legacy
5675          * IRQ Pin Interrupt mode, then we'll also have a Forwarded Interrupt
5676          * Ingress Queue.  Meanwhile, we need two Egress Queues for each
5677          * Queue Set: one for the Free List and one for the Ethernet TX Queue.
5678          *
5679          * Note that we should also take into account all of the various
5680          * Offload Queues.  But, in any situation where we're operating in
5681          * a Resource Constrained Provisioning environment, doing any Offload
5682          * at all is problematic ...
5683          */
5684         niqflint = adap->params.pfres.niqflint - 1;
5685         if (!(adap->flags & CXGB4_USING_MSIX))
5686                 niqflint--;
5687         neq = adap->params.pfres.neq / 2;
5688         avail_qsets = min(niqflint, neq);
5689
5690         if (avail_qsets < adap->params.nports) {
5691                 dev_err(adap->pdev_dev, "avail_eth_qsets=%d < nports=%d\n",
5692                         avail_qsets, adap->params.nports);
5693                 return -ENOMEM;
5694         }
5695
5696         /* Count the number of 10Gb/s or better ports */
5697         for_each_port(adap, i)
5698                 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
5699
5700         avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
5701
5702         /* We default to 1 queue per non-10G port and up to # of cores queues
5703          * per 10G port.
5704          */
5705         if (n10g)
5706                 q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
5707
5708 #ifdef CONFIG_CHELSIO_T4_DCB
5709         /* For Data Center Bridging support we need to be able to support up
5710          * to 8 Traffic Priorities; each of which will be assigned to its
5711          * own TX Queue in order to prevent Head-Of-Line Blocking.
5712          */
5713         q1g = 8;
5714         if (adap->params.nports * 8 > avail_eth_qsets) {
5715                 dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
5716                         avail_eth_qsets, adap->params.nports * 8);
5717                 return -ENOMEM;
5718         }
5719
5720         if (adap->params.nports * ncpus < avail_eth_qsets)
5721                 q10g = max(8U, ncpus);
5722         else
5723                 q10g = max(8U, q10g);
5724
5725         while ((q10g * n10g) >
5726                (avail_eth_qsets - (adap->params.nports - n10g) * q1g))
5727                 q10g--;
5728
5729 #else /* !CONFIG_CHELSIO_T4_DCB */
5730         q1g = 1;
5731         q10g = min(q10g, ncpus);
5732 #endif /* !CONFIG_CHELSIO_T4_DCB */
5733         if (is_kdump_kernel()) {
5734                 q10g = 1;
5735                 q1g = 1;
5736         }
5737
5738         for_each_port(adap, i) {
5739                 struct port_info *pi = adap2pinfo(adap, i);
5740
5741                 pi->first_qset = qidx;
5742                 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g;
5743                 qidx += pi->nqsets;
5744         }
5745
5746         s->ethqsets = qidx;
5747         s->max_ethqsets = qidx;   /* MSI-X may lower it later */
5748         avail_qsets -= qidx;
5749
5750         if (is_uld(adap)) {
5751                 /* For offload we use 1 queue/channel if all ports are up to 1G,
5752                  * otherwise we divide all available queues amongst the channels
5753                  * capped by the number of available cores.
5754                  */
5755                 num_ulds = adap->num_uld + adap->num_ofld_uld;
5756                 i = min_t(u32, MAX_OFLD_QSETS, ncpus);
5757                 avail_uld_qsets = roundup(i, adap->params.nports);
5758                 if (avail_qsets < num_ulds * adap->params.nports) {
5759                         adap->params.offload = 0;
5760                         adap->params.crypto = 0;
5761                         s->ofldqsets = 0;
5762                 } else if (avail_qsets < num_ulds * avail_uld_qsets || !n10g) {
5763                         s->ofldqsets = adap->params.nports;
5764                 } else {
5765                         s->ofldqsets = avail_uld_qsets;
5766                 }
5767
5768                 avail_qsets -= num_ulds * s->ofldqsets;
5769         }
5770
5771         /* ETHOFLD Queues used for QoS offload should follow same
5772          * allocation scheme as normal Ethernet Queues.
5773          */
5774         if (is_ethofld(adap)) {
5775                 if (avail_qsets < s->max_ethqsets) {
5776                         adap->params.ethofld = 0;
5777                         s->eoqsets = 0;
5778                 } else {
5779                         s->eoqsets = s->max_ethqsets;
5780                 }
5781                 avail_qsets -= s->eoqsets;
5782         }
5783
5784         /* Mirror queues must follow same scheme as normal Ethernet
5785          * Queues, when there are enough queues available. Otherwise,
5786          * allocate at least 1 queue per port. If even 1 queue is not
5787          * available, then disable mirror queues support.
5788          */
5789         if (avail_qsets >= s->max_ethqsets)
5790                 s->mirrorqsets = s->max_ethqsets;
5791         else if (avail_qsets >= adap->params.nports)
5792                 s->mirrorqsets = adap->params.nports;
5793         else
5794                 s->mirrorqsets = 0;
5795         avail_qsets -= s->mirrorqsets;
5796
5797         for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5798                 struct sge_eth_rxq *r = &s->ethrxq[i];
5799
5800                 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
5801                 r->fl.size = 72;
5802         }
5803
5804         for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5805                 s->ethtxq[i].q.size = 1024;
5806
5807         for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5808                 s->ctrlq[i].q.size = 512;
5809
5810         if (!is_t4(adap->params.chip))
5811                 s->ptptxq.q.size = 8;
5812
5813         init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
5814         init_rspq(adap, &s->intrq, 0, 1, 512, 64);
5815
5816         return 0;
5817 }
5818
5819 /*
5820  * Reduce the number of Ethernet queues across all ports to at most n.
5821  * n provides at least one queue per port.
5822  */
5823 static void reduce_ethqs(struct adapter *adap, int n)
5824 {
5825         int i;
5826         struct port_info *pi;
5827
5828         while (n < adap->sge.ethqsets)
5829                 for_each_port(adap, i) {
5830                         pi = adap2pinfo(adap, i);
5831                         if (pi->nqsets > 1) {
5832                                 pi->nqsets--;
5833                                 adap->sge.ethqsets--;
5834                                 if (adap->sge.ethqsets <= n)
5835                                         break;
5836                         }
5837                 }
5838
5839         n = 0;
5840         for_each_port(adap, i) {
5841                 pi = adap2pinfo(adap, i);
5842                 pi->first_qset = n;
5843                 n += pi->nqsets;
5844         }
5845 }
5846
5847 static int alloc_msix_info(struct adapter *adap, u32 num_vec)
5848 {
5849         struct msix_info *msix_info;
5850
5851         msix_info = kcalloc(num_vec, sizeof(*msix_info), GFP_KERNEL);
5852         if (!msix_info)
5853                 return -ENOMEM;
5854
5855         adap->msix_bmap.msix_bmap = kcalloc(BITS_TO_LONGS(num_vec),
5856                                             sizeof(long), GFP_KERNEL);
5857         if (!adap->msix_bmap.msix_bmap) {
5858                 kfree(msix_info);
5859                 return -ENOMEM;
5860         }
5861
5862         spin_lock_init(&adap->msix_bmap.lock);
5863         adap->msix_bmap.mapsize = num_vec;
5864
5865         adap->msix_info = msix_info;
5866         return 0;
5867 }
5868
5869 static void free_msix_info(struct adapter *adap)
5870 {
5871         kfree(adap->msix_bmap.msix_bmap);
5872         kfree(adap->msix_info);
5873 }
5874
5875 int cxgb4_get_msix_idx_from_bmap(struct adapter *adap)
5876 {
5877         struct msix_bmap *bmap = &adap->msix_bmap;
5878         unsigned int msix_idx;
5879         unsigned long flags;
5880
5881         spin_lock_irqsave(&bmap->lock, flags);
5882         msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize);
5883         if (msix_idx < bmap->mapsize) {
5884                 __set_bit(msix_idx, bmap->msix_bmap);
5885         } else {
5886                 spin_unlock_irqrestore(&bmap->lock, flags);
5887                 return -ENOSPC;
5888         }
5889
5890         spin_unlock_irqrestore(&bmap->lock, flags);
5891         return msix_idx;
5892 }
5893
5894 void cxgb4_free_msix_idx_in_bmap(struct adapter *adap,
5895                                  unsigned int msix_idx)
5896 {
5897         struct msix_bmap *bmap = &adap->msix_bmap;
5898         unsigned long flags;
5899
5900         spin_lock_irqsave(&bmap->lock, flags);
5901         __clear_bit(msix_idx, bmap->msix_bmap);
5902         spin_unlock_irqrestore(&bmap->lock, flags);
5903 }
5904
5905 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5906 #define EXTRA_VECS 2
5907
5908 static int enable_msix(struct adapter *adap)
5909 {
5910         u32 eth_need, uld_need = 0, ethofld_need = 0, mirror_need = 0;
5911         u32 ethqsets = 0, ofldqsets = 0, eoqsets = 0, mirrorqsets = 0;
5912         u8 num_uld = 0, nchan = adap->params.nports;
5913         u32 i, want, need, num_vec;
5914         struct sge *s = &adap->sge;
5915         struct msix_entry *entries;
5916         struct port_info *pi;
5917         int allocated, ret;
5918
5919         want = s->max_ethqsets;
5920 #ifdef CONFIG_CHELSIO_T4_DCB
5921         /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
5922          * each port.
5923          */
5924         need = 8 * nchan;
5925 #else
5926         need = nchan;
5927 #endif
5928         eth_need = need;
5929         if (is_uld(adap)) {
5930                 num_uld = adap->num_ofld_uld + adap->num_uld;
5931                 want += num_uld * s->ofldqsets;
5932                 uld_need = num_uld * nchan;
5933                 need += uld_need;
5934         }
5935
5936         if (is_ethofld(adap)) {
5937                 want += s->eoqsets;
5938                 ethofld_need = eth_need;
5939                 need += ethofld_need;
5940         }
5941
5942         if (s->mirrorqsets) {
5943                 want += s->mirrorqsets;
5944                 mirror_need = nchan;
5945                 need += mirror_need;
5946         }
5947
5948         want += EXTRA_VECS;
5949         need += EXTRA_VECS;
5950
5951         entries = kmalloc_array(want, sizeof(*entries), GFP_KERNEL);
5952         if (!entries)
5953                 return -ENOMEM;
5954
5955         for (i = 0; i < want; i++)
5956                 entries[i].entry = i;
5957
5958         allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
5959         if (allocated < 0) {
5960                 /* Disable offload and attempt to get vectors for NIC
5961                  * only mode.
5962                  */
5963                 want = s->max_ethqsets + EXTRA_VECS;
5964                 need = eth_need + EXTRA_VECS;
5965                 allocated = pci_enable_msix_range(adap->pdev, entries,
5966                                                   need, want);
5967                 if (allocated < 0) {
5968                         dev_info(adap->pdev_dev,
5969                                  "Disabling MSI-X due to insufficient MSI-X vectors\n");
5970                         ret = allocated;
5971                         goto out_free;
5972                 }
5973
5974                 dev_info(adap->pdev_dev,
5975                          "Disabling offload due to insufficient MSI-X vectors\n");
5976                 adap->params.offload = 0;
5977                 adap->params.crypto = 0;
5978                 adap->params.ethofld = 0;
5979                 s->ofldqsets = 0;
5980                 s->eoqsets = 0;
5981                 s->mirrorqsets = 0;
5982                 uld_need = 0;
5983                 ethofld_need = 0;
5984                 mirror_need = 0;
5985         }
5986
5987         num_vec = allocated;
5988         if (num_vec < want) {
5989                 /* Distribute available vectors to the various queue groups.
5990                  * Every group gets its minimum requirement and NIC gets top
5991                  * priority for leftovers.
5992                  */
5993                 ethqsets = eth_need;
5994                 if (is_uld(adap))
5995                         ofldqsets = nchan;
5996                 if (is_ethofld(adap))
5997                         eoqsets = ethofld_need;
5998                 if (s->mirrorqsets)
5999                         mirrorqsets = mirror_need;
6000
6001                 num_vec -= need;
6002                 while (num_vec) {
6003                         if (num_vec < eth_need + ethofld_need ||
6004                             ethqsets > s->max_ethqsets)
6005                                 break;
6006
6007                         for_each_port(adap, i) {
6008                                 pi = adap2pinfo(adap, i);
6009                                 if (pi->nqsets < 2)
6010                                         continue;
6011
6012                                 ethqsets++;
6013                                 num_vec--;
6014                                 if (ethofld_need) {
6015                                         eoqsets++;
6016                                         num_vec--;
6017                                 }
6018                         }
6019                 }
6020
6021                 if (is_uld(adap)) {
6022                         while (num_vec) {
6023                                 if (num_vec < uld_need ||
6024                                     ofldqsets > s->ofldqsets)
6025                                         break;
6026
6027                                 ofldqsets++;
6028                                 num_vec -= uld_need;
6029                         }
6030                 }
6031
6032                 if (s->mirrorqsets) {
6033                         while (num_vec) {
6034                                 if (num_vec < mirror_need ||
6035                                     mirrorqsets > s->mirrorqsets)
6036                                         break;
6037
6038                                 mirrorqsets++;
6039                                 num_vec -= mirror_need;
6040                         }
6041                 }
6042         } else {
6043                 ethqsets = s->max_ethqsets;
6044                 if (is_uld(adap))
6045                         ofldqsets = s->ofldqsets;
6046                 if (is_ethofld(adap))
6047                         eoqsets = s->eoqsets;
6048                 if (s->mirrorqsets)
6049                         mirrorqsets = s->mirrorqsets;
6050         }
6051
6052         if (ethqsets < s->max_ethqsets) {
6053                 s->max_ethqsets = ethqsets;
6054                 reduce_ethqs(adap, ethqsets);
6055         }
6056
6057         if (is_uld(adap)) {
6058                 s->ofldqsets = ofldqsets;
6059                 s->nqs_per_uld = s->ofldqsets;
6060         }
6061
6062         if (is_ethofld(adap))
6063                 s->eoqsets = eoqsets;
6064
6065         if (s->mirrorqsets) {
6066                 s->mirrorqsets = mirrorqsets;
6067                 for_each_port(adap, i) {
6068                         pi = adap2pinfo(adap, i);
6069                         pi->nmirrorqsets = s->mirrorqsets / nchan;
6070                         mutex_init(&pi->vi_mirror_mutex);
6071                 }
6072         }
6073
6074         /* map for msix */
6075         ret = alloc_msix_info(adap, allocated);
6076         if (ret)
6077                 goto out_disable_msix;
6078
6079         for (i = 0; i < allocated; i++) {
6080                 adap->msix_info[i].vec = entries[i].vector;
6081                 adap->msix_info[i].idx = i;
6082         }
6083
6084         dev_info(adap->pdev_dev,
6085                  "%d MSI-X vectors allocated, nic %d eoqsets %d per uld %d mirrorqsets %d\n",
6086                  allocated, s->max_ethqsets, s->eoqsets, s->nqs_per_uld,
6087                  s->mirrorqsets);
6088
6089         kfree(entries);
6090         return 0;
6091
6092 out_disable_msix:
6093         pci_disable_msix(adap->pdev);
6094
6095 out_free:
6096         kfree(entries);
6097         return ret;
6098 }
6099
6100 #undef EXTRA_VECS
6101
6102 static int init_rss(struct adapter *adap)
6103 {
6104         unsigned int i;
6105         int err;
6106
6107         err = t4_init_rss_mode(adap, adap->mbox);
6108         if (err)
6109                 return err;
6110
6111         for_each_port(adap, i) {
6112                 struct port_info *pi = adap2pinfo(adap, i);
6113
6114                 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6115                 if (!pi->rss)
6116                         return -ENOMEM;
6117         }
6118         return 0;
6119 }
6120
6121 /* Dump basic information about the adapter */
6122 static void print_adapter_info(struct adapter *adapter)
6123 {
6124         /* Hardware/Firmware/etc. Version/Revision IDs */
6125         t4_dump_version_info(adapter);
6126
6127         /* Software/Hardware configuration */
6128         dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
6129                  is_offload(adapter) ? "R" : "",
6130                  ((adapter->flags & CXGB4_USING_MSIX) ? "MSI-X" :
6131                   (adapter->flags & CXGB4_USING_MSI) ? "MSI" : ""),
6132                  is_offload(adapter) ? "Offload" : "non-Offload");
6133 }
6134
6135 static void print_port_info(const struct net_device *dev)
6136 {
6137         char buf[80];
6138         char *bufp = buf;
6139         const struct port_info *pi = netdev_priv(dev);
6140         const struct adapter *adap = pi->adapter;
6141
6142         if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
6143                 bufp += sprintf(bufp, "100M/");
6144         if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
6145                 bufp += sprintf(bufp, "1G/");
6146         if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
6147                 bufp += sprintf(bufp, "10G/");
6148         if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
6149                 bufp += sprintf(bufp, "25G/");
6150         if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
6151                 bufp += sprintf(bufp, "40G/");
6152         if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
6153                 bufp += sprintf(bufp, "50G/");
6154         if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
6155                 bufp += sprintf(bufp, "100G/");
6156         if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G)
6157                 bufp += sprintf(bufp, "200G/");
6158         if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G)
6159                 bufp += sprintf(bufp, "400G/");
6160         if (bufp != buf)
6161                 --bufp;
6162         sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
6163
6164         netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
6165                     dev->name, adap->params.vpd.id, adap->name, buf);
6166 }
6167
6168 /*
6169  * Free the following resources:
6170  * - memory used for tables
6171  * - MSI/MSI-X
6172  * - net devices
6173  * - resources FW is holding for us
6174  */
6175 static void free_some_resources(struct adapter *adapter)
6176 {
6177         unsigned int i;
6178
6179         kvfree(adapter->smt);
6180         kvfree(adapter->l2t);
6181         kvfree(adapter->srq);
6182         t4_cleanup_sched(adapter);
6183         kvfree(adapter->tids.tid_tab);
6184         cxgb4_cleanup_tc_matchall(adapter);
6185         cxgb4_cleanup_tc_mqprio(adapter);
6186         cxgb4_cleanup_tc_flower(adapter);
6187         cxgb4_cleanup_tc_u32(adapter);
6188         cxgb4_cleanup_ethtool_filters(adapter);
6189         kfree(adapter->sge.egr_map);
6190         kfree(adapter->sge.ingr_map);
6191         kfree(adapter->sge.starving_fl);
6192         kfree(adapter->sge.txq_maperr);
6193 #ifdef CONFIG_DEBUG_FS
6194         kfree(adapter->sge.blocked_fl);
6195 #endif
6196         disable_msi(adapter);
6197
6198         for_each_port(adapter, i)
6199                 if (adapter->port[i]) {
6200                         struct port_info *pi = adap2pinfo(adapter, i);
6201
6202                         if (pi->viid != 0)
6203                                 t4_free_vi(adapter, adapter->mbox, adapter->pf,
6204                                            0, pi->viid);
6205                         kfree(adap2pinfo(adapter, i)->rss);
6206                         free_netdev(adapter->port[i]);
6207                 }
6208         if (adapter->flags & CXGB4_FW_OK)
6209                 t4_fw_bye(adapter, adapter->pf);
6210 }
6211
6212 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN | \
6213                    NETIF_F_GSO_UDP_L4)
6214 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
6215                    NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6216 #define SEGMENT_SIZE 128
6217
6218 static int t4_get_chip_type(struct adapter *adap, int ver)
6219 {
6220         u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A));
6221
6222         switch (ver) {
6223         case CHELSIO_T4:
6224                 return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
6225         case CHELSIO_T5:
6226                 return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
6227         case CHELSIO_T6:
6228                 return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
6229         default:
6230                 break;
6231         }
6232         return -EINVAL;
6233 }
6234
6235 #ifdef CONFIG_PCI_IOV
6236 static void cxgb4_mgmt_setup(struct net_device *dev)
6237 {
6238         dev->type = ARPHRD_NONE;
6239         dev->mtu = 0;
6240         dev->hard_header_len = 0;
6241         dev->addr_len = 0;
6242         dev->tx_queue_len = 0;
6243         dev->flags |= IFF_NOARP;
6244         dev->priv_flags |= IFF_NO_QUEUE;
6245
6246         /* Initialize the device structure. */
6247         dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
6248         dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
6249 }
6250
6251 static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
6252 {
6253         struct adapter *adap = pci_get_drvdata(pdev);
6254         int err = 0;
6255         int current_vfs = pci_num_vf(pdev);
6256         u32 pcie_fw;
6257
6258         pcie_fw = readl(adap->regs + PCIE_FW_A);
6259         /* Check if fw is initialized */
6260         if (!(pcie_fw & PCIE_FW_INIT_F)) {
6261                 dev_warn(&pdev->dev, "Device not initialized\n");
6262                 return -EOPNOTSUPP;
6263         }
6264
6265         /* If any of the VF's is already assigned to Guest OS, then
6266          * SRIOV for the same cannot be modified
6267          */
6268         if (current_vfs && pci_vfs_assigned(pdev)) {
6269                 dev_err(&pdev->dev,
6270                         "Cannot modify SR-IOV while VFs are assigned\n");
6271                 return current_vfs;
6272         }
6273         /* Note that the upper-level code ensures that we're never called with
6274          * a non-zero "num_vfs" when we already have VFs instantiated.  But
6275          * it never hurts to code defensively.
6276          */
6277         if (num_vfs != 0 && current_vfs != 0)
6278                 return -EBUSY;
6279
6280         /* Nothing to do for no change. */
6281         if (num_vfs == current_vfs)
6282                 return num_vfs;
6283
6284         /* Disable SRIOV when zero is passed. */
6285         if (!num_vfs) {
6286                 pci_disable_sriov(pdev);
6287                 /* free VF Management Interface */
6288                 unregister_netdev(adap->port[0]);
6289                 free_netdev(adap->port[0]);
6290                 adap->port[0] = NULL;
6291
6292                 /* free VF resources */
6293                 adap->num_vfs = 0;
6294                 kfree(adap->vfinfo);
6295                 adap->vfinfo = NULL;
6296                 return 0;
6297         }
6298
6299         if (!current_vfs) {
6300                 struct fw_pfvf_cmd port_cmd, port_rpl;
6301                 struct net_device *netdev;
6302                 unsigned int pmask, port;
6303                 struct pci_dev *pbridge;
6304                 struct port_info *pi;
6305                 char name[IFNAMSIZ];
6306                 u32 devcap2;
6307                 u16 flags;
6308
6309                 /* If we want to instantiate Virtual Functions, then our
6310                  * parent bridge's PCI-E needs to support Alternative Routing
6311                  * ID (ARI) because our VFs will show up at function offset 8
6312                  * and above.
6313                  */
6314                 pbridge = pdev->bus->self;
6315                 pcie_capability_read_word(pbridge, PCI_EXP_FLAGS, &flags);
6316                 pcie_capability_read_dword(pbridge, PCI_EXP_DEVCAP2, &devcap2);
6317
6318                 if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
6319                     !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
6320                         /* Our parent bridge does not support ARI so issue a
6321                          * warning and skip instantiating the VFs.  They
6322                          * won't be reachable.
6323                          */
6324                         dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
6325                                  pbridge->bus->number, PCI_SLOT(pbridge->devfn),
6326                                  PCI_FUNC(pbridge->devfn));
6327                         return -ENOTSUPP;
6328                 }
6329                 memset(&port_cmd, 0, sizeof(port_cmd));
6330                 port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
6331                                                  FW_CMD_REQUEST_F |
6332                                                  FW_CMD_READ_F |
6333                                                  FW_PFVF_CMD_PFN_V(adap->pf) |
6334                                                  FW_PFVF_CMD_VFN_V(0));
6335                 port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
6336                 err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
6337                                  &port_rpl);
6338                 if (err)
6339                         return err;
6340                 pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
6341                 port = ffs(pmask) - 1;
6342                 /* Allocate VF Management Interface. */
6343                 snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
6344                          adap->pf);
6345                 netdev = alloc_netdev(sizeof(struct port_info),
6346                                       name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
6347                 if (!netdev)
6348                         return -ENOMEM;
6349
6350                 pi = netdev_priv(netdev);
6351                 pi->adapter = adap;
6352                 pi->lport = port;
6353                 pi->tx_chan = port;
6354                 SET_NETDEV_DEV(netdev, &pdev->dev);
6355
6356                 adap->port[0] = netdev;
6357                 pi->port_id = 0;
6358
6359                 err = register_netdev(adap->port[0]);
6360                 if (err) {
6361                         pr_info("Unable to register VF mgmt netdev %s\n", name);
6362                         free_netdev(adap->port[0]);
6363                         adap->port[0] = NULL;
6364                         return err;
6365                 }
6366                 /* Allocate and set up VF Information. */
6367                 adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
6368                                        sizeof(struct vf_info), GFP_KERNEL);
6369                 if (!adap->vfinfo) {
6370                         unregister_netdev(adap->port[0]);
6371                         free_netdev(adap->port[0]);
6372                         adap->port[0] = NULL;
6373                         return -ENOMEM;
6374                 }
6375                 cxgb4_mgmt_fill_vf_station_mac_addr(adap);
6376         }
6377         /* Instantiate the requested number of VFs. */
6378         err = pci_enable_sriov(pdev, num_vfs);
6379         if (err) {
6380                 pr_info("Unable to instantiate %d VFs\n", num_vfs);
6381                 if (!current_vfs) {
6382                         unregister_netdev(adap->port[0]);
6383                         free_netdev(adap->port[0]);
6384                         adap->port[0] = NULL;
6385                         kfree(adap->vfinfo);
6386                         adap->vfinfo = NULL;
6387                 }
6388                 return err;
6389         }
6390
6391         adap->num_vfs = num_vfs;
6392         return num_vfs;
6393 }
6394 #endif /* CONFIG_PCI_IOV */
6395
6396 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) || IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6397
6398 static int chcr_offload_state(struct adapter *adap,
6399                               enum cxgb4_netdev_tls_ops op_val)
6400 {
6401         switch (op_val) {
6402 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
6403         case CXGB4_TLSDEV_OPS:
6404                 if (!adap->uld[CXGB4_ULD_KTLS].handle) {
6405                         dev_dbg(adap->pdev_dev, "ch_ktls driver is not loaded\n");
6406                         return -EOPNOTSUPP;
6407                 }
6408                 if (!adap->uld[CXGB4_ULD_KTLS].tlsdev_ops) {
6409                         dev_dbg(adap->pdev_dev,
6410                                 "ch_ktls driver has no registered tlsdev_ops\n");
6411                         return -EOPNOTSUPP;
6412                 }
6413                 break;
6414 #endif /* CONFIG_CHELSIO_TLS_DEVICE */
6415 #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6416         case CXGB4_XFRMDEV_OPS:
6417                 if (!adap->uld[CXGB4_ULD_IPSEC].handle) {
6418                         dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n");
6419                         return -EOPNOTSUPP;
6420                 }
6421                 if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) {
6422                         dev_dbg(adap->pdev_dev,
6423                                 "chipsec driver has no registered xfrmdev_ops\n");
6424                         return -EOPNOTSUPP;
6425                 }
6426                 break;
6427 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
6428         default:
6429                 dev_dbg(adap->pdev_dev,
6430                         "driver has no support for offload %d\n", op_val);
6431                 return -EOPNOTSUPP;
6432         }
6433
6434         return 0;
6435 }
6436
6437 #endif /* CONFIG_CHELSIO_TLS_DEVICE || CONFIG_CHELSIO_IPSEC_INLINE */
6438
6439 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
6440
6441 static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
6442                               enum tls_offload_ctx_dir direction,
6443                               struct tls_crypto_info *crypto_info,
6444                               u32 tcp_sn)
6445 {
6446         struct adapter *adap = netdev2adap(netdev);
6447         int ret;
6448
6449         mutex_lock(&uld_mutex);
6450         ret = chcr_offload_state(adap, CXGB4_TLSDEV_OPS);
6451         if (ret)
6452                 goto out_unlock;
6453
6454         ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE);
6455         if (ret)
6456                 goto out_unlock;
6457
6458         ret = adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_add(netdev, sk,
6459                                                                 direction,
6460                                                                 crypto_info,
6461                                                                 tcp_sn);
6462         /* if there is a failure, clear the refcount */
6463         if (ret)
6464                 cxgb4_set_ktls_feature(adap,
6465                                        FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
6466 out_unlock:
6467         mutex_unlock(&uld_mutex);
6468         return ret;
6469 }
6470
6471 static void cxgb4_ktls_dev_del(struct net_device *netdev,
6472                                struct tls_context *tls_ctx,
6473                                enum tls_offload_ctx_dir direction)
6474 {
6475         struct adapter *adap = netdev2adap(netdev);
6476
6477         mutex_lock(&uld_mutex);
6478         if (chcr_offload_state(adap, CXGB4_TLSDEV_OPS))
6479                 goto out_unlock;
6480
6481         adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
6482                                                           direction);
6483
6484 out_unlock:
6485         cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
6486         mutex_unlock(&uld_mutex);
6487 }
6488
6489 static const struct tlsdev_ops cxgb4_ktls_ops = {
6490         .tls_dev_add = cxgb4_ktls_dev_add,
6491         .tls_dev_del = cxgb4_ktls_dev_del,
6492 };
6493 #endif /* CONFIG_CHELSIO_TLS_DEVICE */
6494
6495 #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6496
6497 static int cxgb4_xfrm_add_state(struct xfrm_state *x)
6498 {
6499         struct adapter *adap = netdev2adap(x->xso.dev);
6500         int ret;
6501
6502         if (!mutex_trylock(&uld_mutex)) {
6503                 dev_dbg(adap->pdev_dev,
6504                         "crypto uld critical resource is under use\n");
6505                 return -EBUSY;
6506         }
6507         ret = chcr_offload_state(adap, CXGB4_XFRMDEV_OPS);
6508         if (ret)
6509                 goto out_unlock;
6510
6511         ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x);
6512
6513 out_unlock:
6514         mutex_unlock(&uld_mutex);
6515
6516         return ret;
6517 }
6518
6519 static void cxgb4_xfrm_del_state(struct xfrm_state *x)
6520 {
6521         struct adapter *adap = netdev2adap(x->xso.dev);
6522
6523         if (!mutex_trylock(&uld_mutex)) {
6524                 dev_dbg(adap->pdev_dev,
6525                         "crypto uld critical resource is under use\n");
6526                 return;
6527         }
6528         if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6529                 goto out_unlock;
6530
6531         adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x);
6532
6533 out_unlock:
6534         mutex_unlock(&uld_mutex);
6535 }
6536
6537 static void cxgb4_xfrm_free_state(struct xfrm_state *x)
6538 {
6539         struct adapter *adap = netdev2adap(x->xso.dev);
6540
6541         if (!mutex_trylock(&uld_mutex)) {
6542                 dev_dbg(adap->pdev_dev,
6543                         "crypto uld critical resource is under use\n");
6544                 return;
6545         }
6546         if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6547                 goto out_unlock;
6548
6549         adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x);
6550
6551 out_unlock:
6552         mutex_unlock(&uld_mutex);
6553 }
6554
6555 static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
6556 {
6557         struct adapter *adap = netdev2adap(x->xso.dev);
6558         bool ret = false;
6559
6560         if (!mutex_trylock(&uld_mutex)) {
6561                 dev_dbg(adap->pdev_dev,
6562                         "crypto uld critical resource is under use\n");
6563                 return ret;
6564         }
6565         if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6566                 goto out_unlock;
6567
6568         ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x);
6569
6570 out_unlock:
6571         mutex_unlock(&uld_mutex);
6572         return ret;
6573 }
6574
6575 static void cxgb4_advance_esn_state(struct xfrm_state *x)
6576 {
6577         struct adapter *adap = netdev2adap(x->xso.dev);
6578
6579         if (!mutex_trylock(&uld_mutex)) {
6580                 dev_dbg(adap->pdev_dev,
6581                         "crypto uld critical resource is under use\n");
6582                 return;
6583         }
6584         if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
6585                 goto out_unlock;
6586
6587         adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x);
6588
6589 out_unlock:
6590         mutex_unlock(&uld_mutex);
6591 }
6592
6593 static const struct xfrmdev_ops cxgb4_xfrmdev_ops = {
6594         .xdo_dev_state_add      = cxgb4_xfrm_add_state,
6595         .xdo_dev_state_delete   = cxgb4_xfrm_del_state,
6596         .xdo_dev_state_free     = cxgb4_xfrm_free_state,
6597         .xdo_dev_offload_ok     = cxgb4_ipsec_offload_ok,
6598         .xdo_dev_state_advance_esn = cxgb4_advance_esn_state,
6599 };
6600
6601 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
6602
6603 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6604 {
6605         struct net_device *netdev;
6606         struct adapter *adapter;
6607         static int adap_idx = 1;
6608         int s_qpp, qpp, num_seg;
6609         struct port_info *pi;
6610         bool highdma = false;
6611         enum chip_type chip;
6612         void __iomem *regs;
6613         int func, chip_ver;
6614         u16 device_id;
6615         int i, err;
6616         u32 whoami;
6617
6618         err = pci_request_regions(pdev, KBUILD_MODNAME);
6619         if (err) {
6620                 /* Just info, some other driver may have claimed the device. */
6621                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6622                 return err;
6623         }
6624
6625         err = pci_enable_device(pdev);
6626         if (err) {
6627                 dev_err(&pdev->dev, "cannot enable PCI device\n");
6628                 goto out_release_regions;
6629         }
6630
6631         regs = pci_ioremap_bar(pdev, 0);
6632         if (!regs) {
6633                 dev_err(&pdev->dev, "cannot map device registers\n");
6634                 err = -ENOMEM;
6635                 goto out_disable_device;
6636         }
6637
6638         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6639         if (!adapter) {
6640                 err = -ENOMEM;
6641                 goto out_unmap_bar0;
6642         }
6643
6644         adapter->regs = regs;
6645         err = t4_wait_dev_ready(regs);
6646         if (err < 0)
6647                 goto out_free_adapter;
6648
6649         /* We control everything through one PF */
6650         whoami = t4_read_reg(adapter, PL_WHOAMI_A);
6651         pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
6652         chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id));
6653         if ((int)chip < 0) {
6654                 dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
6655                 err = chip;
6656                 goto out_free_adapter;
6657         }
6658         chip_ver = CHELSIO_CHIP_VERSION(chip);
6659         func = chip_ver <= CHELSIO_T5 ?
6660                SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
6661
6662         adapter->pdev = pdev;
6663         adapter->pdev_dev = &pdev->dev;
6664         adapter->name = pci_name(pdev);
6665         adapter->mbox = func;
6666         adapter->pf = func;
6667         adapter->params.chip = chip;
6668         adapter->adap_idx = adap_idx;
6669         adapter->msg_enable = DFLT_MSG_ENABLE;
6670         adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
6671                                     (sizeof(struct mbox_cmd) *
6672                                      T4_OS_LOG_MBOX_CMDS),
6673                                     GFP_KERNEL);
6674         if (!adapter->mbox_log) {
6675                 err = -ENOMEM;
6676                 goto out_free_adapter;
6677         }
6678         spin_lock_init(&adapter->mbox_lock);
6679         INIT_LIST_HEAD(&adapter->mlist.list);
6680         adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
6681         pci_set_drvdata(pdev, adapter);
6682
6683         if (func != ent->driver_data) {
6684                 pci_disable_device(pdev);
6685                 pci_save_state(pdev);        /* to restore SR-IOV later */
6686                 return 0;
6687         }
6688
6689         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6690                 highdma = true;
6691                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6692                 if (err) {
6693                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6694                                 "coherent allocations\n");
6695                         goto out_free_adapter;
6696                 }
6697         } else {
6698                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6699                 if (err) {
6700                         dev_err(&pdev->dev, "no usable DMA configuration\n");
6701                         goto out_free_adapter;
6702                 }
6703         }
6704
6705         pci_enable_pcie_error_reporting(pdev);
6706         pci_set_master(pdev);
6707         pci_save_state(pdev);
6708         adap_idx++;
6709         adapter->workq = create_singlethread_workqueue("cxgb4");
6710         if (!adapter->workq) {
6711                 err = -ENOMEM;
6712                 goto out_free_adapter;
6713         }
6714
6715         /* PCI device has been enabled */
6716         adapter->flags |= CXGB4_DEV_ENABLED;
6717         memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6718
6719         /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
6720          * Ingress Packet Data to Free List Buffers in order to allow for
6721          * chipset performance optimizations between the Root Complex and
6722          * Memory Controllers.  (Messages to the associated Ingress Queue
6723          * notifying new Packet Placement in the Free Lists Buffers will be
6724          * send without the Relaxed Ordering Attribute thus guaranteeing that
6725          * all preceding PCIe Transaction Layer Packets will be processed
6726          * first.)  But some Root Complexes have various issues with Upstream
6727          * Transaction Layer Packets with the Relaxed Ordering Attribute set.
6728          * The PCIe devices which under the Root Complexes will be cleared the
6729          * Relaxed Ordering bit in the configuration space, So we check our
6730          * PCIe configuration space to see if it's flagged with advice against
6731          * using Relaxed Ordering.
6732          */
6733         if (!pcie_relaxed_ordering_enabled(pdev))
6734                 adapter->flags |= CXGB4_ROOT_NO_RELAXED_ORDERING;
6735
6736         spin_lock_init(&adapter->stats_lock);
6737         spin_lock_init(&adapter->tid_release_lock);
6738         spin_lock_init(&adapter->win0_lock);
6739
6740         INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6741         INIT_WORK(&adapter->db_full_task, process_db_full);
6742         INIT_WORK(&adapter->db_drop_task, process_db_drop);
6743         INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err);
6744
6745         err = t4_prep_adapter(adapter);
6746         if (err)
6747                 goto out_free_adapter;
6748
6749         if (is_kdump_kernel()) {
6750                 /* Collect hardware state and append to /proc/vmcore */
6751                 err = cxgb4_cudbg_vmcore_add_dump(adapter);
6752                 if (err) {
6753                         dev_warn(adapter->pdev_dev,
6754                                  "Fail collecting vmcore device dump, err: %d. Continuing\n",
6755                                  err);
6756                         err = 0;
6757                 }
6758         }
6759
6760         if (!is_t4(adapter->params.chip)) {
6761                 s_qpp = (QUEUESPERPAGEPF0_S +
6762                         (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
6763                         adapter->pf);
6764                 qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
6765                       SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
6766                 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6767
6768                 /* Each segment size is 128B. Write coalescing is enabled only
6769                  * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6770                  * queue is less no of segments that can be accommodated in
6771                  * a page size.
6772                  */
6773                 if (qpp > num_seg) {
6774                         dev_err(&pdev->dev,
6775                                 "Incorrect number of egress queues per page\n");
6776                         err = -EINVAL;
6777                         goto out_free_adapter;
6778                 }
6779                 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6780                 pci_resource_len(pdev, 2));
6781                 if (!adapter->bar2) {
6782                         dev_err(&pdev->dev, "cannot map device bar2 region\n");
6783                         err = -ENOMEM;
6784                         goto out_free_adapter;
6785                 }
6786         }
6787
6788         setup_memwin(adapter);
6789         err = adap_init0(adapter, 0);
6790 #ifdef CONFIG_DEBUG_FS
6791         bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
6792 #endif
6793         setup_memwin_rdma(adapter);
6794         if (err)
6795                 goto out_unmap_bar;
6796
6797         /* configure SGE_STAT_CFG_A to read WC stats */
6798         if (!is_t4(adapter->params.chip))
6799                 t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
6800                              (is_t5(adapter->params.chip) ? STATMODE_V(0) :
6801                               T6_STATMODE_V(0)));
6802
6803         /* Initialize hash mac addr list */
6804         INIT_LIST_HEAD(&adapter->mac_hlist);
6805
6806         for_each_port(adapter, i) {
6807                 /* For supporting MQPRIO Offload, need some extra
6808                  * queues for each ETHOFLD TIDs. Keep it equal to
6809                  * MAX_ATIDs for now. Once we connect to firmware
6810                  * later and query the EOTID params, we'll come to
6811                  * know the actual # of EOTIDs supported.
6812                  */
6813                 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6814                                            MAX_ETH_QSETS + MAX_ATIDS);
6815                 if (!netdev) {
6816                         err = -ENOMEM;
6817                         goto out_free_dev;
6818                 }
6819
6820                 SET_NETDEV_DEV(netdev, &pdev->dev);
6821
6822                 adapter->port[i] = netdev;
6823                 pi = netdev_priv(netdev);
6824                 pi->adapter = adapter;
6825                 pi->xact_addr_filt = -1;
6826                 pi->port_id = i;
6827                 netdev->irq = pdev->irq;
6828
6829                 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6830                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6831                         NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_GRO |
6832                         NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
6833                         NETIF_F_HW_TC | NETIF_F_NTUPLE;
6834
6835                 if (chip_ver > CHELSIO_T5) {
6836                         netdev->hw_enc_features |= NETIF_F_IP_CSUM |
6837                                                    NETIF_F_IPV6_CSUM |
6838                                                    NETIF_F_RXCSUM |
6839                                                    NETIF_F_GSO_UDP_TUNNEL |
6840                                                    NETIF_F_GSO_UDP_TUNNEL_CSUM |
6841                                                    NETIF_F_TSO | NETIF_F_TSO6;
6842
6843                         netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
6844                                                NETIF_F_GSO_UDP_TUNNEL_CSUM |
6845                                                NETIF_F_HW_TLS_RECORD;
6846
6847                         if (adapter->rawf_cnt)
6848                                 netdev->udp_tunnel_nic_info = &cxgb_udp_tunnels;
6849                 }
6850
6851                 if (highdma)
6852                         netdev->hw_features |= NETIF_F_HIGHDMA;
6853                 netdev->features |= netdev->hw_features;
6854                 netdev->vlan_features = netdev->features & VLAN_FEAT;
6855 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
6856                 if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) {
6857                         netdev->hw_features |= NETIF_F_HW_TLS_TX;
6858                         netdev->tlsdev_ops = &cxgb4_ktls_ops;
6859                         /* initialize the refcount */
6860                         refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0);
6861                 }
6862 #endif /* CONFIG_CHELSIO_TLS_DEVICE */
6863 #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
6864                 if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) {
6865                         netdev->hw_enc_features |= NETIF_F_HW_ESP;
6866                         netdev->features |= NETIF_F_HW_ESP;
6867                         netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops;
6868                 }
6869 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
6870
6871                 netdev->priv_flags |= IFF_UNICAST_FLT;
6872
6873                 /* MTU range: 81 - 9600 */
6874                 netdev->min_mtu = 81;              /* accommodate SACK */
6875                 netdev->max_mtu = MAX_MTU;
6876
6877                 netdev->netdev_ops = &cxgb4_netdev_ops;
6878 #ifdef CONFIG_CHELSIO_T4_DCB
6879                 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6880                 cxgb4_dcb_state_init(netdev);
6881                 cxgb4_dcb_version_init(netdev);
6882 #endif
6883                 cxgb4_set_ethtool_ops(netdev);
6884         }
6885
6886         cxgb4_init_ethtool_dump(adapter);
6887
6888         pci_set_drvdata(pdev, adapter);
6889
6890         if (adapter->flags & CXGB4_FW_OK) {
6891                 err = t4_port_init(adapter, func, func, 0);
6892                 if (err)
6893                         goto out_free_dev;
6894         } else if (adapter->params.nports == 1) {
6895                 /* If we don't have a connection to the firmware -- possibly
6896                  * because of an error -- grab the raw VPD parameters so we
6897                  * can set the proper MAC Address on the debug network
6898                  * interface that we've created.
6899                  */
6900                 u8 hw_addr[ETH_ALEN];
6901                 u8 *na = adapter->params.vpd.na;
6902
6903                 err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
6904                 if (!err) {
6905                         for (i = 0; i < ETH_ALEN; i++)
6906                                 hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
6907                                               hex2val(na[2 * i + 1]));
6908                         t4_set_hw_addr(adapter, 0, hw_addr);
6909                 }
6910         }
6911
6912         if (!(adapter->flags & CXGB4_FW_OK))
6913                 goto fw_attach_fail;
6914
6915         /* Configure queues and allocate tables now, they can be needed as
6916          * soon as the first register_netdev completes.
6917          */
6918         err = cfg_queues(adapter);
6919         if (err)
6920                 goto out_free_dev;
6921
6922         adapter->smt = t4_init_smt();
6923         if (!adapter->smt) {
6924                 /* We tolerate a lack of SMT, giving up some functionality */
6925                 dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
6926         }
6927
6928         adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
6929         if (!adapter->l2t) {
6930                 /* We tolerate a lack of L2T, giving up some functionality */
6931                 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6932                 adapter->params.offload = 0;
6933         }
6934
6935 #if IS_ENABLED(CONFIG_IPV6)
6936         if (chip_ver <= CHELSIO_T5 &&
6937             (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
6938                 /* CLIP functionality is not present in hardware,
6939                  * hence disable all offload features
6940                  */
6941                 dev_warn(&pdev->dev,
6942                          "CLIP not enabled in hardware, continuing\n");
6943                 adapter->params.offload = 0;
6944         } else {
6945                 adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
6946                                                   adapter->clipt_end);
6947                 if (!adapter->clipt) {
6948                         /* We tolerate a lack of clip_table, giving up
6949                          * some functionality
6950                          */
6951                         dev_warn(&pdev->dev,
6952                                  "could not allocate Clip table, continuing\n");
6953                         adapter->params.offload = 0;
6954                 }
6955         }
6956 #endif
6957
6958         for_each_port(adapter, i) {
6959                 pi = adap2pinfo(adapter, i);
6960                 pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
6961                 if (!pi->sched_tbl)
6962                         dev_warn(&pdev->dev,
6963                                  "could not activate scheduling on port %d\n",
6964                                  i);
6965         }
6966
6967         if (is_offload(adapter) || is_hashfilter(adapter)) {
6968                 if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
6969                         u32 v;
6970
6971                         v = t4_read_reg(adapter, LE_DB_HASH_CONFIG_A);
6972                         if (chip_ver <= CHELSIO_T5) {
6973                                 adapter->tids.nhash = 1 << HASHTIDSIZE_G(v);
6974                                 v = t4_read_reg(adapter, LE_DB_TID_HASHBASE_A);
6975                                 adapter->tids.hash_base = v / 4;
6976                         } else {
6977                                 adapter->tids.nhash = HASHTBLSIZE_G(v) << 3;
6978                                 v = t4_read_reg(adapter,
6979                                                 T6_LE_DB_HASH_TID_BASE_A);
6980                                 adapter->tids.hash_base = v;
6981                         }
6982                 }
6983         }
6984
6985         if (tid_init(&adapter->tids) < 0) {
6986                 dev_warn(&pdev->dev, "could not allocate TID table, "
6987                          "continuing\n");
6988                 adapter->params.offload = 0;
6989         } else {
6990                 adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
6991                 if (!adapter->tc_u32)
6992                         dev_warn(&pdev->dev,
6993                                  "could not offload tc u32, continuing\n");
6994
6995                 if (cxgb4_init_tc_flower(adapter))
6996                         dev_warn(&pdev->dev,
6997                                  "could not offload tc flower, continuing\n");
6998
6999                 if (cxgb4_init_tc_mqprio(adapter))
7000                         dev_warn(&pdev->dev,
7001                                  "could not offload tc mqprio, continuing\n");
7002
7003                 if (cxgb4_init_tc_matchall(adapter))
7004                         dev_warn(&pdev->dev,
7005                                  "could not offload tc matchall, continuing\n");
7006                 if (cxgb4_init_ethtool_filters(adapter))
7007                         dev_warn(&pdev->dev,
7008                                  "could not initialize ethtool filters, continuing\n");
7009         }
7010
7011         /* See what interrupts we'll be using */
7012         if (msi > 1 && enable_msix(adapter) == 0)
7013                 adapter->flags |= CXGB4_USING_MSIX;
7014         else if (msi > 0 && pci_enable_msi(pdev) == 0) {
7015                 adapter->flags |= CXGB4_USING_MSI;
7016                 if (msi > 1)
7017                         free_msix_info(adapter);
7018         }
7019
7020         /* check for PCI Express bandwidth capabiltites */
7021         pcie_print_link_status(pdev);
7022
7023         cxgb4_init_mps_ref_entries(adapter);
7024
7025         err = init_rss(adapter);
7026         if (err)
7027                 goto out_free_dev;
7028
7029         err = setup_non_data_intr(adapter);
7030         if (err) {
7031                 dev_err(adapter->pdev_dev,
7032                         "Non Data interrupt allocation failed, err: %d\n", err);
7033                 goto out_free_dev;
7034         }
7035
7036         err = setup_fw_sge_queues(adapter);
7037         if (err) {
7038                 dev_err(adapter->pdev_dev,
7039                         "FW sge queue allocation failed, err %d", err);
7040                 goto out_free_dev;
7041         }
7042
7043 fw_attach_fail:
7044         /*
7045          * The card is now ready to go.  If any errors occur during device
7046          * registration we do not fail the whole card but rather proceed only
7047          * with the ports we manage to register successfully.  However we must
7048          * register at least one net device.
7049          */
7050         for_each_port(adapter, i) {
7051                 pi = adap2pinfo(adapter, i);
7052                 adapter->port[i]->dev_port = pi->lport;
7053                 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
7054                 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
7055
7056                 netif_carrier_off(adapter->port[i]);
7057
7058                 err = register_netdev(adapter->port[i]);
7059                 if (err)
7060                         break;
7061                 adapter->chan_map[pi->tx_chan] = i;
7062                 print_port_info(adapter->port[i]);
7063         }
7064         if (i == 0) {
7065                 dev_err(&pdev->dev, "could not register any net devices\n");
7066                 goto out_free_dev;
7067         }
7068         if (err) {
7069                 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
7070                 err = 0;
7071         }
7072
7073         if (cxgb4_debugfs_root) {
7074                 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
7075                                                            cxgb4_debugfs_root);
7076                 setup_debugfs(adapter);
7077         }
7078
7079         /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
7080         pdev->needs_freset = 1;
7081
7082         if (is_uld(adapter))
7083                 cxgb4_uld_enable(adapter);
7084
7085         if (!is_t4(adapter->params.chip))
7086                 cxgb4_ptp_init(adapter);
7087
7088         if (IS_REACHABLE(CONFIG_THERMAL) &&
7089             !is_t4(adapter->params.chip) && (adapter->flags & CXGB4_FW_OK))
7090                 cxgb4_thermal_init(adapter);
7091
7092         print_adapter_info(adapter);
7093         return 0;
7094
7095  out_free_dev:
7096         t4_free_sge_resources(adapter);
7097         free_some_resources(adapter);
7098         if (adapter->flags & CXGB4_USING_MSIX)
7099                 free_msix_info(adapter);
7100         if (adapter->num_uld || adapter->num_ofld_uld)
7101                 t4_uld_mem_free(adapter);
7102  out_unmap_bar:
7103         if (!is_t4(adapter->params.chip))
7104                 iounmap(adapter->bar2);
7105  out_free_adapter:
7106         if (adapter->workq)
7107                 destroy_workqueue(adapter->workq);
7108
7109         kfree(adapter->mbox_log);
7110         kfree(adapter);
7111  out_unmap_bar0:
7112         iounmap(regs);
7113  out_disable_device:
7114         pci_disable_pcie_error_reporting(pdev);
7115         pci_disable_device(pdev);
7116  out_release_regions:
7117         pci_release_regions(pdev);
7118         return err;
7119 }
7120
7121 static void remove_one(struct pci_dev *pdev)
7122 {
7123         struct adapter *adapter = pci_get_drvdata(pdev);
7124         struct hash_mac_addr *entry, *tmp;
7125
7126         if (!adapter) {
7127                 pci_release_regions(pdev);
7128                 return;
7129         }
7130
7131         /* If we allocated filters, free up state associated with any
7132          * valid filters ...
7133          */
7134         clear_all_filters(adapter);
7135
7136         adapter->flags |= CXGB4_SHUTTING_DOWN;
7137
7138         if (adapter->pf == 4) {
7139                 int i;
7140
7141                 /* Tear down per-adapter Work Queue first since it can contain
7142                  * references to our adapter data structure.
7143                  */
7144                 destroy_workqueue(adapter->workq);
7145
7146                 if (is_uld(adapter)) {
7147                         detach_ulds(adapter);
7148                         t4_uld_clean_up(adapter);
7149                 }
7150
7151                 adap_free_hma_mem(adapter);
7152
7153                 disable_interrupts(adapter);
7154
7155                 cxgb4_free_mps_ref_entries(adapter);
7156
7157                 for_each_port(adapter, i)
7158                         if (adapter->port[i]->reg_state == NETREG_REGISTERED)
7159                                 unregister_netdev(adapter->port[i]);
7160
7161                 debugfs_remove_recursive(adapter->debugfs_root);
7162
7163                 if (!is_t4(adapter->params.chip))
7164                         cxgb4_ptp_stop(adapter);
7165                 if (IS_REACHABLE(CONFIG_THERMAL))
7166                         cxgb4_thermal_remove(adapter);
7167
7168                 if (adapter->flags & CXGB4_FULL_INIT_DONE)
7169                         cxgb_down(adapter);
7170
7171                 if (adapter->flags & CXGB4_USING_MSIX)
7172                         free_msix_info(adapter);
7173                 if (adapter->num_uld || adapter->num_ofld_uld)
7174                         t4_uld_mem_free(adapter);
7175                 free_some_resources(adapter);
7176                 list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
7177                                          list) {
7178                         list_del(&entry->list);
7179                         kfree(entry);
7180                 }
7181
7182 #if IS_ENABLED(CONFIG_IPV6)
7183                 t4_cleanup_clip_tbl(adapter);
7184 #endif
7185                 if (!is_t4(adapter->params.chip))
7186                         iounmap(adapter->bar2);
7187         }
7188 #ifdef CONFIG_PCI_IOV
7189         else {
7190                 cxgb4_iov_configure(adapter->pdev, 0);
7191         }
7192 #endif
7193         iounmap(adapter->regs);
7194         pci_disable_pcie_error_reporting(pdev);
7195         if ((adapter->flags & CXGB4_DEV_ENABLED)) {
7196                 pci_disable_device(pdev);
7197                 adapter->flags &= ~CXGB4_DEV_ENABLED;
7198         }
7199         pci_release_regions(pdev);
7200         kfree(adapter->mbox_log);
7201         synchronize_rcu();
7202         kfree(adapter);
7203 }
7204
7205 /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
7206  * delivery.  This is essentially a stripped down version of the PCI remove()
7207  * function where we do the minimal amount of work necessary to shutdown any
7208  * further activity.
7209  */
7210 static void shutdown_one(struct pci_dev *pdev)
7211 {
7212         struct adapter *adapter = pci_get_drvdata(pdev);
7213
7214         /* As with remove_one() above (see extended comment), we only want do
7215          * do cleanup on PCI Devices which went all the way through init_one()
7216          * ...
7217          */
7218         if (!adapter) {
7219                 pci_release_regions(pdev);
7220                 return;
7221         }
7222
7223         adapter->flags |= CXGB4_SHUTTING_DOWN;
7224
7225         if (adapter->pf == 4) {
7226                 int i;
7227
7228                 for_each_port(adapter, i)
7229                         if (adapter->port[i]->reg_state == NETREG_REGISTERED)
7230                                 cxgb_close(adapter->port[i]);
7231
7232                 rtnl_lock();
7233                 cxgb4_mqprio_stop_offload(adapter);
7234                 rtnl_unlock();
7235
7236                 if (is_uld(adapter)) {
7237                         detach_ulds(adapter);
7238                         t4_uld_clean_up(adapter);
7239                 }
7240
7241                 disable_interrupts(adapter);
7242                 disable_msi(adapter);
7243
7244                 t4_sge_stop(adapter);
7245                 if (adapter->flags & CXGB4_FW_OK)
7246                         t4_fw_bye(adapter, adapter->mbox);
7247         }
7248 }
7249
7250 static struct pci_driver cxgb4_driver = {
7251         .name     = KBUILD_MODNAME,
7252         .id_table = cxgb4_pci_tbl,
7253         .probe    = init_one,
7254         .remove   = remove_one,
7255         .shutdown = shutdown_one,
7256 #ifdef CONFIG_PCI_IOV
7257         .sriov_configure = cxgb4_iov_configure,
7258 #endif
7259         .err_handler = &cxgb4_eeh,
7260 };
7261
7262 static int __init cxgb4_init_module(void)
7263 {
7264         int ret;
7265
7266         cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
7267
7268         ret = pci_register_driver(&cxgb4_driver);
7269         if (ret < 0)
7270                 goto err_pci;
7271
7272 #if IS_ENABLED(CONFIG_IPV6)
7273         if (!inet6addr_registered) {
7274                 ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
7275                 if (ret)
7276                         pci_unregister_driver(&cxgb4_driver);
7277                 else
7278                         inet6addr_registered = true;
7279         }
7280 #endif
7281
7282         if (ret == 0)
7283                 return ret;
7284
7285 err_pci:
7286         debugfs_remove(cxgb4_debugfs_root);
7287
7288         return ret;
7289 }
7290
7291 static void __exit cxgb4_cleanup_module(void)
7292 {
7293 #if IS_ENABLED(CONFIG_IPV6)
7294         if (inet6addr_registered) {
7295                 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
7296                 inet6addr_registered = false;
7297         }
7298 #endif
7299         pci_unregister_driver(&cxgb4_driver);
7300         debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
7301 }
7302
7303 module_init(cxgb4_init_module);
7304 module_exit(cxgb4_cleanup_module);