d6a5fce1b06e99f5cc1676e44743b2225deaa11e
[linux-2.6-microblaze.git] / drivers / net / ethernet / broadcom / bnxt / bnxt.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2014-2016 Broadcom Corporation
4  * Copyright (c) 2016-2019 Broadcom Limited
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  */
10
11 #include <linux/module.h>
12
13 #include <linux/stringify.h>
14 #include <linux/kernel.h>
15 #include <linux/timer.h>
16 #include <linux/errno.h>
17 #include <linux/ioport.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/interrupt.h>
21 #include <linux/pci.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/skbuff.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/bitops.h>
27 #include <linux/io.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
30 #include <asm/byteorder.h>
31 #include <asm/page.h>
32 #include <linux/time.h>
33 #include <linux/mii.h>
34 #include <linux/mdio.h>
35 #include <linux/if.h>
36 #include <linux/if_vlan.h>
37 #include <linux/if_bridge.h>
38 #include <linux/rtc.h>
39 #include <linux/bpf.h>
40 #include <net/ip.h>
41 #include <net/tcp.h>
42 #include <net/udp.h>
43 #include <net/checksum.h>
44 #include <net/ip6_checksum.h>
45 #include <net/udp_tunnel.h>
46 #include <linux/workqueue.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/log2.h>
50 #include <linux/aer.h>
51 #include <linux/bitmap.h>
52 #include <linux/cpu_rmap.h>
53 #include <linux/cpumask.h>
54 #include <net/pkt_cls.h>
55 #include <linux/hwmon.h>
56 #include <linux/hwmon-sysfs.h>
57 #include <net/page_pool.h>
58
59 #include "bnxt_hsi.h"
60 #include "bnxt.h"
61 #include "bnxt_ulp.h"
62 #include "bnxt_sriov.h"
63 #include "bnxt_ethtool.h"
64 #include "bnxt_dcb.h"
65 #include "bnxt_xdp.h"
66 #include "bnxt_vfr.h"
67 #include "bnxt_tc.h"
68 #include "bnxt_devlink.h"
69 #include "bnxt_debugfs.h"
70
71 #define BNXT_TX_TIMEOUT         (5 * HZ)
72
73 static const char version[] =
74         "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
75
76 MODULE_LICENSE("GPL");
77 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
78 MODULE_VERSION(DRV_MODULE_VERSION);
79
80 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
81 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
82 #define BNXT_RX_COPY_THRESH 256
83
84 #define BNXT_TX_PUSH_THRESH 164
85
86 enum board_idx {
87         BCM57301,
88         BCM57302,
89         BCM57304,
90         BCM57417_NPAR,
91         BCM58700,
92         BCM57311,
93         BCM57312,
94         BCM57402,
95         BCM57404,
96         BCM57406,
97         BCM57402_NPAR,
98         BCM57407,
99         BCM57412,
100         BCM57414,
101         BCM57416,
102         BCM57417,
103         BCM57412_NPAR,
104         BCM57314,
105         BCM57417_SFP,
106         BCM57416_SFP,
107         BCM57404_NPAR,
108         BCM57406_NPAR,
109         BCM57407_SFP,
110         BCM57407_NPAR,
111         BCM57414_NPAR,
112         BCM57416_NPAR,
113         BCM57452,
114         BCM57454,
115         BCM5745x_NPAR,
116         BCM57508,
117         BCM57504,
118         BCM57502,
119         BCM57508_NPAR,
120         BCM57504_NPAR,
121         BCM57502_NPAR,
122         BCM58802,
123         BCM58804,
124         BCM58808,
125         NETXTREME_E_VF,
126         NETXTREME_C_VF,
127         NETXTREME_S_VF,
128         NETXTREME_E_P5_VF,
129 };
130
131 /* indexed by enum above */
132 static const struct {
133         char *name;
134 } board_info[] = {
135         [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
136         [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
137         [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
138         [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
139         [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
140         [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
141         [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
142         [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
143         [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
144         [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
145         [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
146         [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
147         [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
148         [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
149         [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
150         [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
151         [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
152         [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
153         [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
154         [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
155         [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
156         [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
157         [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
158         [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
159         [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
160         [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
161         [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
162         [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
163         [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
164         [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
165         [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
166         [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
167         [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
168         [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
169         [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
170         [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
171         [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
172         [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
173         [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
174         [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
175         [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
176         [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
177 };
178
179 static const struct pci_device_id bnxt_pci_tbl[] = {
180         { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
181         { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
182         { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
183         { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
184         { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
185         { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
186         { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
187         { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
188         { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
189         { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
190         { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
191         { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
192         { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
193         { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
194         { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
195         { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
196         { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
197         { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
198         { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
199         { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
200         { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
201         { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
202         { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
203         { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
204         { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
205         { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
206         { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
207         { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
208         { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
209         { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
210         { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
211         { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
212         { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
213         { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
214         { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
215         { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
216         { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
217         { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
218         { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
219         { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
220         { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
221         { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
222         { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
223         { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
224         { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
225         { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
226 #ifdef CONFIG_BNXT_SRIOV
227         { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
228         { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
229         { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
230         { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
231         { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
232         { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
233         { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
234         { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
235         { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
236         { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
237         { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
238 #endif
239         { 0 }
240 };
241
242 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
243
244 static const u16 bnxt_vf_req_snif[] = {
245         HWRM_FUNC_CFG,
246         HWRM_FUNC_VF_CFG,
247         HWRM_PORT_PHY_QCFG,
248         HWRM_CFA_L2_FILTER_ALLOC,
249 };
250
251 static const u16 bnxt_async_events_arr[] = {
252         ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
253         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
254         ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
255         ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
256         ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
257         ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
258         ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
259         ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
260         ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
261 };
262
263 static struct workqueue_struct *bnxt_pf_wq;
264
265 static bool bnxt_vf_pciid(enum board_idx idx)
266 {
267         return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
268                 idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF);
269 }
270
271 #define DB_CP_REARM_FLAGS       (DB_KEY_CP | DB_IDX_VALID)
272 #define DB_CP_FLAGS             (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
273 #define DB_CP_IRQ_DIS_FLAGS     (DB_KEY_CP | DB_IRQ_DIS)
274
275 #define BNXT_CP_DB_IRQ_DIS(db)                                          \
276                 writel(DB_CP_IRQ_DIS_FLAGS, db)
277
278 #define BNXT_DB_CQ(db, idx)                                             \
279         writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
280
281 #define BNXT_DB_NQ_P5(db, idx)                                          \
282         writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
283
284 #define BNXT_DB_CQ_ARM(db, idx)                                         \
285         writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
286
287 #define BNXT_DB_NQ_ARM_P5(db, idx)                                      \
288         writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
289
290 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
291 {
292         if (bp->flags & BNXT_FLAG_CHIP_P5)
293                 BNXT_DB_NQ_P5(db, idx);
294         else
295                 BNXT_DB_CQ(db, idx);
296 }
297
298 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
299 {
300         if (bp->flags & BNXT_FLAG_CHIP_P5)
301                 BNXT_DB_NQ_ARM_P5(db, idx);
302         else
303                 BNXT_DB_CQ_ARM(db, idx);
304 }
305
306 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
307 {
308         if (bp->flags & BNXT_FLAG_CHIP_P5)
309                 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
310                        db->doorbell);
311         else
312                 BNXT_DB_CQ(db, idx);
313 }
314
315 const u16 bnxt_lhint_arr[] = {
316         TX_BD_FLAGS_LHINT_512_AND_SMALLER,
317         TX_BD_FLAGS_LHINT_512_TO_1023,
318         TX_BD_FLAGS_LHINT_1024_TO_2047,
319         TX_BD_FLAGS_LHINT_1024_TO_2047,
320         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
321         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
322         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
323         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
324         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
325         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
326         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
327         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
328         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
329         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
330         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
331         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
332         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
333         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
334         TX_BD_FLAGS_LHINT_2048_AND_LARGER,
335 };
336
337 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
338 {
339         struct metadata_dst *md_dst = skb_metadata_dst(skb);
340
341         if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
342                 return 0;
343
344         return md_dst->u.port_info.port_id;
345 }
346
347 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
348 {
349         struct bnxt *bp = netdev_priv(dev);
350         struct tx_bd *txbd;
351         struct tx_bd_ext *txbd1;
352         struct netdev_queue *txq;
353         int i;
354         dma_addr_t mapping;
355         unsigned int length, pad = 0;
356         u32 len, free_size, vlan_tag_flags, cfa_action, flags;
357         u16 prod, last_frag;
358         struct pci_dev *pdev = bp->pdev;
359         struct bnxt_tx_ring_info *txr;
360         struct bnxt_sw_tx_bd *tx_buf;
361
362         i = skb_get_queue_mapping(skb);
363         if (unlikely(i >= bp->tx_nr_rings)) {
364                 dev_kfree_skb_any(skb);
365                 return NETDEV_TX_OK;
366         }
367
368         txq = netdev_get_tx_queue(dev, i);
369         txr = &bp->tx_ring[bp->tx_ring_map[i]];
370         prod = txr->tx_prod;
371
372         free_size = bnxt_tx_avail(bp, txr);
373         if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
374                 netif_tx_stop_queue(txq);
375                 return NETDEV_TX_BUSY;
376         }
377
378         length = skb->len;
379         len = skb_headlen(skb);
380         last_frag = skb_shinfo(skb)->nr_frags;
381
382         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
383
384         txbd->tx_bd_opaque = prod;
385
386         tx_buf = &txr->tx_buf_ring[prod];
387         tx_buf->skb = skb;
388         tx_buf->nr_frags = last_frag;
389
390         vlan_tag_flags = 0;
391         cfa_action = bnxt_xmit_get_cfa_action(skb);
392         if (skb_vlan_tag_present(skb)) {
393                 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
394                                  skb_vlan_tag_get(skb);
395                 /* Currently supports 8021Q, 8021AD vlan offloads
396                  * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
397                  */
398                 if (skb->vlan_proto == htons(ETH_P_8021Q))
399                         vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
400         }
401
402         if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
403                 struct tx_push_buffer *tx_push_buf = txr->tx_push;
404                 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
405                 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
406                 void __iomem *db = txr->tx_db.doorbell;
407                 void *pdata = tx_push_buf->data;
408                 u64 *end;
409                 int j, push_len;
410
411                 /* Set COAL_NOW to be ready quickly for the next push */
412                 tx_push->tx_bd_len_flags_type =
413                         cpu_to_le32((length << TX_BD_LEN_SHIFT) |
414                                         TX_BD_TYPE_LONG_TX_BD |
415                                         TX_BD_FLAGS_LHINT_512_AND_SMALLER |
416                                         TX_BD_FLAGS_COAL_NOW |
417                                         TX_BD_FLAGS_PACKET_END |
418                                         (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
419
420                 if (skb->ip_summed == CHECKSUM_PARTIAL)
421                         tx_push1->tx_bd_hsize_lflags =
422                                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
423                 else
424                         tx_push1->tx_bd_hsize_lflags = 0;
425
426                 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
427                 tx_push1->tx_bd_cfa_action =
428                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
429
430                 end = pdata + length;
431                 end = PTR_ALIGN(end, 8) - 1;
432                 *end = 0;
433
434                 skb_copy_from_linear_data(skb, pdata, len);
435                 pdata += len;
436                 for (j = 0; j < last_frag; j++) {
437                         skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
438                         void *fptr;
439
440                         fptr = skb_frag_address_safe(frag);
441                         if (!fptr)
442                                 goto normal_tx;
443
444                         memcpy(pdata, fptr, skb_frag_size(frag));
445                         pdata += skb_frag_size(frag);
446                 }
447
448                 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
449                 txbd->tx_bd_haddr = txr->data_mapping;
450                 prod = NEXT_TX(prod);
451                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
452                 memcpy(txbd, tx_push1, sizeof(*txbd));
453                 prod = NEXT_TX(prod);
454                 tx_push->doorbell =
455                         cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
456                 txr->tx_prod = prod;
457
458                 tx_buf->is_push = 1;
459                 netdev_tx_sent_queue(txq, skb->len);
460                 wmb();  /* Sync is_push and byte queue before pushing data */
461
462                 push_len = (length + sizeof(*tx_push) + 7) / 8;
463                 if (push_len > 16) {
464                         __iowrite64_copy(db, tx_push_buf, 16);
465                         __iowrite32_copy(db + 4, tx_push_buf + 1,
466                                          (push_len - 16) << 1);
467                 } else {
468                         __iowrite64_copy(db, tx_push_buf, push_len);
469                 }
470
471                 goto tx_done;
472         }
473
474 normal_tx:
475         if (length < BNXT_MIN_PKT_SIZE) {
476                 pad = BNXT_MIN_PKT_SIZE - length;
477                 if (skb_pad(skb, pad)) {
478                         /* SKB already freed. */
479                         tx_buf->skb = NULL;
480                         return NETDEV_TX_OK;
481                 }
482                 length = BNXT_MIN_PKT_SIZE;
483         }
484
485         mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
486
487         if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
488                 dev_kfree_skb_any(skb);
489                 tx_buf->skb = NULL;
490                 return NETDEV_TX_OK;
491         }
492
493         dma_unmap_addr_set(tx_buf, mapping, mapping);
494         flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
495                 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
496
497         txbd->tx_bd_haddr = cpu_to_le64(mapping);
498
499         prod = NEXT_TX(prod);
500         txbd1 = (struct tx_bd_ext *)
501                 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
502
503         txbd1->tx_bd_hsize_lflags = 0;
504         if (skb_is_gso(skb)) {
505                 u32 hdr_len;
506
507                 if (skb->encapsulation)
508                         hdr_len = skb_inner_network_offset(skb) +
509                                 skb_inner_network_header_len(skb) +
510                                 inner_tcp_hdrlen(skb);
511                 else
512                         hdr_len = skb_transport_offset(skb) +
513                                 tcp_hdrlen(skb);
514
515                 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
516                                         TX_BD_FLAGS_T_IPID |
517                                         (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
518                 length = skb_shinfo(skb)->gso_size;
519                 txbd1->tx_bd_mss = cpu_to_le32(length);
520                 length += hdr_len;
521         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
522                 txbd1->tx_bd_hsize_lflags =
523                         cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
524                 txbd1->tx_bd_mss = 0;
525         }
526
527         length >>= 9;
528         if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
529                 dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
530                                      skb->len);
531                 i = 0;
532                 goto tx_dma_error;
533         }
534         flags |= bnxt_lhint_arr[length];
535         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
536
537         txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
538         txbd1->tx_bd_cfa_action =
539                         cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
540         for (i = 0; i < last_frag; i++) {
541                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
542
543                 prod = NEXT_TX(prod);
544                 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
545
546                 len = skb_frag_size(frag);
547                 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
548                                            DMA_TO_DEVICE);
549
550                 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
551                         goto tx_dma_error;
552
553                 tx_buf = &txr->tx_buf_ring[prod];
554                 dma_unmap_addr_set(tx_buf, mapping, mapping);
555
556                 txbd->tx_bd_haddr = cpu_to_le64(mapping);
557
558                 flags = len << TX_BD_LEN_SHIFT;
559                 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
560         }
561
562         flags &= ~TX_BD_LEN;
563         txbd->tx_bd_len_flags_type =
564                 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
565                             TX_BD_FLAGS_PACKET_END);
566
567         netdev_tx_sent_queue(txq, skb->len);
568
569         /* Sync BD data before updating doorbell */
570         wmb();
571
572         prod = NEXT_TX(prod);
573         txr->tx_prod = prod;
574
575         if (!netdev_xmit_more() || netif_xmit_stopped(txq))
576                 bnxt_db_write(bp, &txr->tx_db, prod);
577
578 tx_done:
579
580         if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
581                 if (netdev_xmit_more() && !tx_buf->is_push)
582                         bnxt_db_write(bp, &txr->tx_db, prod);
583
584                 netif_tx_stop_queue(txq);
585
586                 /* netif_tx_stop_queue() must be done before checking
587                  * tx index in bnxt_tx_avail() below, because in
588                  * bnxt_tx_int(), we update tx index before checking for
589                  * netif_tx_queue_stopped().
590                  */
591                 smp_mb();
592                 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
593                         netif_tx_wake_queue(txq);
594         }
595         return NETDEV_TX_OK;
596
597 tx_dma_error:
598         last_frag = i;
599
600         /* start back at beginning and unmap skb */
601         prod = txr->tx_prod;
602         tx_buf = &txr->tx_buf_ring[prod];
603         tx_buf->skb = NULL;
604         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
605                          skb_headlen(skb), PCI_DMA_TODEVICE);
606         prod = NEXT_TX(prod);
607
608         /* unmap remaining mapped pages */
609         for (i = 0; i < last_frag; i++) {
610                 prod = NEXT_TX(prod);
611                 tx_buf = &txr->tx_buf_ring[prod];
612                 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
613                                skb_frag_size(&skb_shinfo(skb)->frags[i]),
614                                PCI_DMA_TODEVICE);
615         }
616
617         dev_kfree_skb_any(skb);
618         return NETDEV_TX_OK;
619 }
620
621 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
622 {
623         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
624         struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
625         u16 cons = txr->tx_cons;
626         struct pci_dev *pdev = bp->pdev;
627         int i;
628         unsigned int tx_bytes = 0;
629
630         for (i = 0; i < nr_pkts; i++) {
631                 struct bnxt_sw_tx_bd *tx_buf;
632                 struct sk_buff *skb;
633                 int j, last;
634
635                 tx_buf = &txr->tx_buf_ring[cons];
636                 cons = NEXT_TX(cons);
637                 skb = tx_buf->skb;
638                 tx_buf->skb = NULL;
639
640                 if (tx_buf->is_push) {
641                         tx_buf->is_push = 0;
642                         goto next_tx_int;
643                 }
644
645                 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
646                                  skb_headlen(skb), PCI_DMA_TODEVICE);
647                 last = tx_buf->nr_frags;
648
649                 for (j = 0; j < last; j++) {
650                         cons = NEXT_TX(cons);
651                         tx_buf = &txr->tx_buf_ring[cons];
652                         dma_unmap_page(
653                                 &pdev->dev,
654                                 dma_unmap_addr(tx_buf, mapping),
655                                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
656                                 PCI_DMA_TODEVICE);
657                 }
658
659 next_tx_int:
660                 cons = NEXT_TX(cons);
661
662                 tx_bytes += skb->len;
663                 dev_kfree_skb_any(skb);
664         }
665
666         netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
667         txr->tx_cons = cons;
668
669         /* Need to make the tx_cons update visible to bnxt_start_xmit()
670          * before checking for netif_tx_queue_stopped().  Without the
671          * memory barrier, there is a small possibility that bnxt_start_xmit()
672          * will miss it and cause the queue to be stopped forever.
673          */
674         smp_mb();
675
676         if (unlikely(netif_tx_queue_stopped(txq)) &&
677             (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
678                 __netif_tx_lock(txq, smp_processor_id());
679                 if (netif_tx_queue_stopped(txq) &&
680                     bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
681                     txr->dev_state != BNXT_DEV_STATE_CLOSING)
682                         netif_tx_wake_queue(txq);
683                 __netif_tx_unlock(txq);
684         }
685 }
686
687 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
688                                          struct bnxt_rx_ring_info *rxr,
689                                          gfp_t gfp)
690 {
691         struct device *dev = &bp->pdev->dev;
692         struct page *page;
693
694         page = page_pool_dev_alloc_pages(rxr->page_pool);
695         if (!page)
696                 return NULL;
697
698         *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
699                                       DMA_ATTR_WEAK_ORDERING);
700         if (dma_mapping_error(dev, *mapping)) {
701                 page_pool_recycle_direct(rxr->page_pool, page);
702                 return NULL;
703         }
704         *mapping += bp->rx_dma_offset;
705         return page;
706 }
707
708 static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
709                                        gfp_t gfp)
710 {
711         u8 *data;
712         struct pci_dev *pdev = bp->pdev;
713
714         data = kmalloc(bp->rx_buf_size, gfp);
715         if (!data)
716                 return NULL;
717
718         *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
719                                         bp->rx_buf_use_size, bp->rx_dir,
720                                         DMA_ATTR_WEAK_ORDERING);
721
722         if (dma_mapping_error(&pdev->dev, *mapping)) {
723                 kfree(data);
724                 data = NULL;
725         }
726         return data;
727 }
728
729 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
730                        u16 prod, gfp_t gfp)
731 {
732         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
733         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
734         dma_addr_t mapping;
735
736         if (BNXT_RX_PAGE_MODE(bp)) {
737                 struct page *page =
738                         __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
739
740                 if (!page)
741                         return -ENOMEM;
742
743                 rx_buf->data = page;
744                 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
745         } else {
746                 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
747
748                 if (!data)
749                         return -ENOMEM;
750
751                 rx_buf->data = data;
752                 rx_buf->data_ptr = data + bp->rx_offset;
753         }
754         rx_buf->mapping = mapping;
755
756         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
757         return 0;
758 }
759
760 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
761 {
762         u16 prod = rxr->rx_prod;
763         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
764         struct rx_bd *cons_bd, *prod_bd;
765
766         prod_rx_buf = &rxr->rx_buf_ring[prod];
767         cons_rx_buf = &rxr->rx_buf_ring[cons];
768
769         prod_rx_buf->data = data;
770         prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
771
772         prod_rx_buf->mapping = cons_rx_buf->mapping;
773
774         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
775         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
776
777         prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
778 }
779
780 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
781 {
782         u16 next, max = rxr->rx_agg_bmap_size;
783
784         next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
785         if (next >= max)
786                 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
787         return next;
788 }
789
790 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
791                                      struct bnxt_rx_ring_info *rxr,
792                                      u16 prod, gfp_t gfp)
793 {
794         struct rx_bd *rxbd =
795                 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
796         struct bnxt_sw_rx_agg_bd *rx_agg_buf;
797         struct pci_dev *pdev = bp->pdev;
798         struct page *page;
799         dma_addr_t mapping;
800         u16 sw_prod = rxr->rx_sw_agg_prod;
801         unsigned int offset = 0;
802
803         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
804                 page = rxr->rx_page;
805                 if (!page) {
806                         page = alloc_page(gfp);
807                         if (!page)
808                                 return -ENOMEM;
809                         rxr->rx_page = page;
810                         rxr->rx_page_offset = 0;
811                 }
812                 offset = rxr->rx_page_offset;
813                 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
814                 if (rxr->rx_page_offset == PAGE_SIZE)
815                         rxr->rx_page = NULL;
816                 else
817                         get_page(page);
818         } else {
819                 page = alloc_page(gfp);
820                 if (!page)
821                         return -ENOMEM;
822         }
823
824         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
825                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
826                                      DMA_ATTR_WEAK_ORDERING);
827         if (dma_mapping_error(&pdev->dev, mapping)) {
828                 __free_page(page);
829                 return -EIO;
830         }
831
832         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
833                 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
834
835         __set_bit(sw_prod, rxr->rx_agg_bmap);
836         rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
837         rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
838
839         rx_agg_buf->page = page;
840         rx_agg_buf->offset = offset;
841         rx_agg_buf->mapping = mapping;
842         rxbd->rx_bd_haddr = cpu_to_le64(mapping);
843         rxbd->rx_bd_opaque = sw_prod;
844         return 0;
845 }
846
847 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
848                                        struct bnxt_cp_ring_info *cpr,
849                                        u16 cp_cons, u16 curr)
850 {
851         struct rx_agg_cmp *agg;
852
853         cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
854         agg = (struct rx_agg_cmp *)
855                 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
856         return agg;
857 }
858
859 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
860                                               struct bnxt_rx_ring_info *rxr,
861                                               u16 agg_id, u16 curr)
862 {
863         struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
864
865         return &tpa_info->agg_arr[curr];
866 }
867
868 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
869                                    u16 start, u32 agg_bufs, bool tpa)
870 {
871         struct bnxt_napi *bnapi = cpr->bnapi;
872         struct bnxt *bp = bnapi->bp;
873         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
874         u16 prod = rxr->rx_agg_prod;
875         u16 sw_prod = rxr->rx_sw_agg_prod;
876         bool p5_tpa = false;
877         u32 i;
878
879         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
880                 p5_tpa = true;
881
882         for (i = 0; i < agg_bufs; i++) {
883                 u16 cons;
884                 struct rx_agg_cmp *agg;
885                 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
886                 struct rx_bd *prod_bd;
887                 struct page *page;
888
889                 if (p5_tpa)
890                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
891                 else
892                         agg = bnxt_get_agg(bp, cpr, idx, start + i);
893                 cons = agg->rx_agg_cmp_opaque;
894                 __clear_bit(cons, rxr->rx_agg_bmap);
895
896                 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
897                         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
898
899                 __set_bit(sw_prod, rxr->rx_agg_bmap);
900                 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
901                 cons_rx_buf = &rxr->rx_agg_ring[cons];
902
903                 /* It is possible for sw_prod to be equal to cons, so
904                  * set cons_rx_buf->page to NULL first.
905                  */
906                 page = cons_rx_buf->page;
907                 cons_rx_buf->page = NULL;
908                 prod_rx_buf->page = page;
909                 prod_rx_buf->offset = cons_rx_buf->offset;
910
911                 prod_rx_buf->mapping = cons_rx_buf->mapping;
912
913                 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
914
915                 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
916                 prod_bd->rx_bd_opaque = sw_prod;
917
918                 prod = NEXT_RX_AGG(prod);
919                 sw_prod = NEXT_RX_AGG(sw_prod);
920         }
921         rxr->rx_agg_prod = prod;
922         rxr->rx_sw_agg_prod = sw_prod;
923 }
924
925 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
926                                         struct bnxt_rx_ring_info *rxr,
927                                         u16 cons, void *data, u8 *data_ptr,
928                                         dma_addr_t dma_addr,
929                                         unsigned int offset_and_len)
930 {
931         unsigned int payload = offset_and_len >> 16;
932         unsigned int len = offset_and_len & 0xffff;
933         skb_frag_t *frag;
934         struct page *page = data;
935         u16 prod = rxr->rx_prod;
936         struct sk_buff *skb;
937         int off, err;
938
939         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
940         if (unlikely(err)) {
941                 bnxt_reuse_rx_data(rxr, cons, data);
942                 return NULL;
943         }
944         dma_addr -= bp->rx_dma_offset;
945         dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
946                              DMA_ATTR_WEAK_ORDERING);
947
948         if (unlikely(!payload))
949                 payload = eth_get_headlen(bp->dev, data_ptr, len);
950
951         skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
952         if (!skb) {
953                 __free_page(page);
954                 return NULL;
955         }
956
957         off = (void *)data_ptr - page_address(page);
958         skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
959         memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
960                payload + NET_IP_ALIGN);
961
962         frag = &skb_shinfo(skb)->frags[0];
963         skb_frag_size_sub(frag, payload);
964         skb_frag_off_add(frag, payload);
965         skb->data_len -= payload;
966         skb->tail += payload;
967
968         return skb;
969 }
970
971 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
972                                    struct bnxt_rx_ring_info *rxr, u16 cons,
973                                    void *data, u8 *data_ptr,
974                                    dma_addr_t dma_addr,
975                                    unsigned int offset_and_len)
976 {
977         u16 prod = rxr->rx_prod;
978         struct sk_buff *skb;
979         int err;
980
981         err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
982         if (unlikely(err)) {
983                 bnxt_reuse_rx_data(rxr, cons, data);
984                 return NULL;
985         }
986
987         skb = build_skb(data, 0);
988         dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
989                                bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
990         if (!skb) {
991                 kfree(data);
992                 return NULL;
993         }
994
995         skb_reserve(skb, bp->rx_offset);
996         skb_put(skb, offset_and_len & 0xffff);
997         return skb;
998 }
999
1000 static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
1001                                      struct bnxt_cp_ring_info *cpr,
1002                                      struct sk_buff *skb, u16 idx,
1003                                      u32 agg_bufs, bool tpa)
1004 {
1005         struct bnxt_napi *bnapi = cpr->bnapi;
1006         struct pci_dev *pdev = bp->pdev;
1007         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1008         u16 prod = rxr->rx_agg_prod;
1009         bool p5_tpa = false;
1010         u32 i;
1011
1012         if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1013                 p5_tpa = true;
1014
1015         for (i = 0; i < agg_bufs; i++) {
1016                 u16 cons, frag_len;
1017                 struct rx_agg_cmp *agg;
1018                 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1019                 struct page *page;
1020                 dma_addr_t mapping;
1021
1022                 if (p5_tpa)
1023                         agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1024                 else
1025                         agg = bnxt_get_agg(bp, cpr, idx, i);
1026                 cons = agg->rx_agg_cmp_opaque;
1027                 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1028                             RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1029
1030                 cons_rx_buf = &rxr->rx_agg_ring[cons];
1031                 skb_fill_page_desc(skb, i, cons_rx_buf->page,
1032                                    cons_rx_buf->offset, frag_len);
1033                 __clear_bit(cons, rxr->rx_agg_bmap);
1034
1035                 /* It is possible for bnxt_alloc_rx_page() to allocate
1036                  * a sw_prod index that equals the cons index, so we
1037                  * need to clear the cons entry now.
1038                  */
1039                 mapping = cons_rx_buf->mapping;
1040                 page = cons_rx_buf->page;
1041                 cons_rx_buf->page = NULL;
1042
1043                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1044                         struct skb_shared_info *shinfo;
1045                         unsigned int nr_frags;
1046
1047                         shinfo = skb_shinfo(skb);
1048                         nr_frags = --shinfo->nr_frags;
1049                         __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1050
1051                         dev_kfree_skb(skb);
1052
1053                         cons_rx_buf->page = page;
1054
1055                         /* Update prod since possibly some pages have been
1056                          * allocated already.
1057                          */
1058                         rxr->rx_agg_prod = prod;
1059                         bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1060                         return NULL;
1061                 }
1062
1063                 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1064                                      PCI_DMA_FROMDEVICE,
1065                                      DMA_ATTR_WEAK_ORDERING);
1066
1067                 skb->data_len += frag_len;
1068                 skb->len += frag_len;
1069                 skb->truesize += PAGE_SIZE;
1070
1071                 prod = NEXT_RX_AGG(prod);
1072         }
1073         rxr->rx_agg_prod = prod;
1074         return skb;
1075 }
1076
1077 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1078                                u8 agg_bufs, u32 *raw_cons)
1079 {
1080         u16 last;
1081         struct rx_agg_cmp *agg;
1082
1083         *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1084         last = RING_CMP(*raw_cons);
1085         agg = (struct rx_agg_cmp *)
1086                 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1087         return RX_AGG_CMP_VALID(agg, *raw_cons);
1088 }
1089
1090 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1091                                             unsigned int len,
1092                                             dma_addr_t mapping)
1093 {
1094         struct bnxt *bp = bnapi->bp;
1095         struct pci_dev *pdev = bp->pdev;
1096         struct sk_buff *skb;
1097
1098         skb = napi_alloc_skb(&bnapi->napi, len);
1099         if (!skb)
1100                 return NULL;
1101
1102         dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1103                                 bp->rx_dir);
1104
1105         memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1106                len + NET_IP_ALIGN);
1107
1108         dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1109                                    bp->rx_dir);
1110
1111         skb_put(skb, len);
1112         return skb;
1113 }
1114
1115 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1116                            u32 *raw_cons, void *cmp)
1117 {
1118         struct rx_cmp *rxcmp = cmp;
1119         u32 tmp_raw_cons = *raw_cons;
1120         u8 cmp_type, agg_bufs = 0;
1121
1122         cmp_type = RX_CMP_TYPE(rxcmp);
1123
1124         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1125                 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1126                             RX_CMP_AGG_BUFS) >>
1127                            RX_CMP_AGG_BUFS_SHIFT;
1128         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1129                 struct rx_tpa_end_cmp *tpa_end = cmp;
1130
1131                 if (bp->flags & BNXT_FLAG_CHIP_P5)
1132                         return 0;
1133
1134                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1135         }
1136
1137         if (agg_bufs) {
1138                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1139                         return -EBUSY;
1140         }
1141         *raw_cons = tmp_raw_cons;
1142         return 0;
1143 }
1144
1145 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1146 {
1147         if (BNXT_PF(bp))
1148                 queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1149         else
1150                 schedule_delayed_work(&bp->fw_reset_task, delay);
1151 }
1152
1153 static void bnxt_queue_sp_work(struct bnxt *bp)
1154 {
1155         if (BNXT_PF(bp))
1156                 queue_work(bnxt_pf_wq, &bp->sp_task);
1157         else
1158                 schedule_work(&bp->sp_task);
1159 }
1160
1161 static void bnxt_cancel_sp_work(struct bnxt *bp)
1162 {
1163         if (BNXT_PF(bp))
1164                 flush_workqueue(bnxt_pf_wq);
1165         else
1166                 cancel_work_sync(&bp->sp_task);
1167 }
1168
1169 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1170 {
1171         if (!rxr->bnapi->in_reset) {
1172                 rxr->bnapi->in_reset = true;
1173                 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1174                 bnxt_queue_sp_work(bp);
1175         }
1176         rxr->rx_next_cons = 0xffff;
1177 }
1178
1179 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1180 {
1181         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1182         u16 idx = agg_id & MAX_TPA_P5_MASK;
1183
1184         if (test_bit(idx, map->agg_idx_bmap))
1185                 idx = find_first_zero_bit(map->agg_idx_bmap,
1186                                           BNXT_AGG_IDX_BMAP_SIZE);
1187         __set_bit(idx, map->agg_idx_bmap);
1188         map->agg_id_tbl[agg_id] = idx;
1189         return idx;
1190 }
1191
1192 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1193 {
1194         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1195
1196         __clear_bit(idx, map->agg_idx_bmap);
1197 }
1198
1199 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1200 {
1201         struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1202
1203         return map->agg_id_tbl[agg_id];
1204 }
1205
1206 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1207                            struct rx_tpa_start_cmp *tpa_start,
1208                            struct rx_tpa_start_cmp_ext *tpa_start1)
1209 {
1210         struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1211         struct bnxt_tpa_info *tpa_info;
1212         u16 cons, prod, agg_id;
1213         struct rx_bd *prod_bd;
1214         dma_addr_t mapping;
1215
1216         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1217                 agg_id = TPA_START_AGG_ID_P5(tpa_start);
1218                 agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1219         } else {
1220                 agg_id = TPA_START_AGG_ID(tpa_start);
1221         }
1222         cons = tpa_start->rx_tpa_start_cmp_opaque;
1223         prod = rxr->rx_prod;
1224         cons_rx_buf = &rxr->rx_buf_ring[cons];
1225         prod_rx_buf = &rxr->rx_buf_ring[prod];
1226         tpa_info = &rxr->rx_tpa[agg_id];
1227
1228         if (unlikely(cons != rxr->rx_next_cons ||
1229                      TPA_START_ERROR(tpa_start))) {
1230                 netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1231                             cons, rxr->rx_next_cons,
1232                             TPA_START_ERROR_CODE(tpa_start1));
1233                 bnxt_sched_reset(bp, rxr);
1234                 return;
1235         }
1236         /* Store cfa_code in tpa_info to use in tpa_end
1237          * completion processing.
1238          */
1239         tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1240         prod_rx_buf->data = tpa_info->data;
1241         prod_rx_buf->data_ptr = tpa_info->data_ptr;
1242
1243         mapping = tpa_info->mapping;
1244         prod_rx_buf->mapping = mapping;
1245
1246         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1247
1248         prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1249
1250         tpa_info->data = cons_rx_buf->data;
1251         tpa_info->data_ptr = cons_rx_buf->data_ptr;
1252         cons_rx_buf->data = NULL;
1253         tpa_info->mapping = cons_rx_buf->mapping;
1254
1255         tpa_info->len =
1256                 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1257                                 RX_TPA_START_CMP_LEN_SHIFT;
1258         if (likely(TPA_START_HASH_VALID(tpa_start))) {
1259                 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1260
1261                 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1262                 tpa_info->gso_type = SKB_GSO_TCPV4;
1263                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1264                 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1265                         tpa_info->gso_type = SKB_GSO_TCPV6;
1266                 tpa_info->rss_hash =
1267                         le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1268         } else {
1269                 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1270                 tpa_info->gso_type = 0;
1271                 if (netif_msg_rx_err(bp))
1272                         netdev_warn(bp->dev, "TPA packet without valid hash\n");
1273         }
1274         tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1275         tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1276         tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1277         tpa_info->agg_count = 0;
1278
1279         rxr->rx_prod = NEXT_RX(prod);
1280         cons = NEXT_RX(cons);
1281         rxr->rx_next_cons = NEXT_RX(cons);
1282         cons_rx_buf = &rxr->rx_buf_ring[cons];
1283
1284         bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1285         rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1286         cons_rx_buf->data = NULL;
1287 }
1288
1289 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1290 {
1291         if (agg_bufs)
1292                 bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1293 }
1294
1295 #ifdef CONFIG_INET
1296 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1297 {
1298         struct udphdr *uh = NULL;
1299
1300         if (ip_proto == htons(ETH_P_IP)) {
1301                 struct iphdr *iph = (struct iphdr *)skb->data;
1302
1303                 if (iph->protocol == IPPROTO_UDP)
1304                         uh = (struct udphdr *)(iph + 1);
1305         } else {
1306                 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1307
1308                 if (iph->nexthdr == IPPROTO_UDP)
1309                         uh = (struct udphdr *)(iph + 1);
1310         }
1311         if (uh) {
1312                 if (uh->check)
1313                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1314                 else
1315                         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1316         }
1317 }
1318 #endif
1319
1320 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1321                                            int payload_off, int tcp_ts,
1322                                            struct sk_buff *skb)
1323 {
1324 #ifdef CONFIG_INET
1325         struct tcphdr *th;
1326         int len, nw_off;
1327         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1328         u32 hdr_info = tpa_info->hdr_info;
1329         bool loopback = false;
1330
1331         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1332         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1333         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1334
1335         /* If the packet is an internal loopback packet, the offsets will
1336          * have an extra 4 bytes.
1337          */
1338         if (inner_mac_off == 4) {
1339                 loopback = true;
1340         } else if (inner_mac_off > 4) {
1341                 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1342                                             ETH_HLEN - 2));
1343
1344                 /* We only support inner iPv4/ipv6.  If we don't see the
1345                  * correct protocol ID, it must be a loopback packet where
1346                  * the offsets are off by 4.
1347                  */
1348                 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1349                         loopback = true;
1350         }
1351         if (loopback) {
1352                 /* internal loopback packet, subtract all offsets by 4 */
1353                 inner_ip_off -= 4;
1354                 inner_mac_off -= 4;
1355                 outer_ip_off -= 4;
1356         }
1357
1358         nw_off = inner_ip_off - ETH_HLEN;
1359         skb_set_network_header(skb, nw_off);
1360         if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1361                 struct ipv6hdr *iph = ipv6_hdr(skb);
1362
1363                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1364                 len = skb->len - skb_transport_offset(skb);
1365                 th = tcp_hdr(skb);
1366                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1367         } else {
1368                 struct iphdr *iph = ip_hdr(skb);
1369
1370                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1371                 len = skb->len - skb_transport_offset(skb);
1372                 th = tcp_hdr(skb);
1373                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1374         }
1375
1376         if (inner_mac_off) { /* tunnel */
1377                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1378                                             ETH_HLEN - 2));
1379
1380                 bnxt_gro_tunnel(skb, proto);
1381         }
1382 #endif
1383         return skb;
1384 }
1385
1386 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1387                                            int payload_off, int tcp_ts,
1388                                            struct sk_buff *skb)
1389 {
1390 #ifdef CONFIG_INET
1391         u16 outer_ip_off, inner_ip_off, inner_mac_off;
1392         u32 hdr_info = tpa_info->hdr_info;
1393         int iphdr_len, nw_off;
1394
1395         inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1396         inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1397         outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1398
1399         nw_off = inner_ip_off - ETH_HLEN;
1400         skb_set_network_header(skb, nw_off);
1401         iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1402                      sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1403         skb_set_transport_header(skb, nw_off + iphdr_len);
1404
1405         if (inner_mac_off) { /* tunnel */
1406                 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1407                                             ETH_HLEN - 2));
1408
1409                 bnxt_gro_tunnel(skb, proto);
1410         }
1411 #endif
1412         return skb;
1413 }
1414
1415 #define BNXT_IPV4_HDR_SIZE      (sizeof(struct iphdr) + sizeof(struct tcphdr))
1416 #define BNXT_IPV6_HDR_SIZE      (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1417
1418 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1419                                            int payload_off, int tcp_ts,
1420                                            struct sk_buff *skb)
1421 {
1422 #ifdef CONFIG_INET
1423         struct tcphdr *th;
1424         int len, nw_off, tcp_opt_len = 0;
1425
1426         if (tcp_ts)
1427                 tcp_opt_len = 12;
1428
1429         if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1430                 struct iphdr *iph;
1431
1432                 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1433                          ETH_HLEN;
1434                 skb_set_network_header(skb, nw_off);
1435                 iph = ip_hdr(skb);
1436                 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1437                 len = skb->len - skb_transport_offset(skb);
1438                 th = tcp_hdr(skb);
1439                 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1440         } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1441                 struct ipv6hdr *iph;
1442
1443                 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1444                          ETH_HLEN;
1445                 skb_set_network_header(skb, nw_off);
1446                 iph = ipv6_hdr(skb);
1447                 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1448                 len = skb->len - skb_transport_offset(skb);
1449                 th = tcp_hdr(skb);
1450                 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1451         } else {
1452                 dev_kfree_skb_any(skb);
1453                 return NULL;
1454         }
1455
1456         if (nw_off) /* tunnel */
1457                 bnxt_gro_tunnel(skb, skb->protocol);
1458 #endif
1459         return skb;
1460 }
1461
1462 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1463                                            struct bnxt_tpa_info *tpa_info,
1464                                            struct rx_tpa_end_cmp *tpa_end,
1465                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1466                                            struct sk_buff *skb)
1467 {
1468 #ifdef CONFIG_INET
1469         int payload_off;
1470         u16 segs;
1471
1472         segs = TPA_END_TPA_SEGS(tpa_end);
1473         if (segs == 1)
1474                 return skb;
1475
1476         NAPI_GRO_CB(skb)->count = segs;
1477         skb_shinfo(skb)->gso_size =
1478                 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1479         skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1480         if (bp->flags & BNXT_FLAG_CHIP_P5)
1481                 payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1482         else
1483                 payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1484         skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1485         if (likely(skb))
1486                 tcp_gro_complete(skb);
1487 #endif
1488         return skb;
1489 }
1490
1491 /* Given the cfa_code of a received packet determine which
1492  * netdev (vf-rep or PF) the packet is destined to.
1493  */
1494 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1495 {
1496         struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1497
1498         /* if vf-rep dev is NULL, the must belongs to the PF */
1499         return dev ? dev : bp->dev;
1500 }
1501
1502 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1503                                            struct bnxt_cp_ring_info *cpr,
1504                                            u32 *raw_cons,
1505                                            struct rx_tpa_end_cmp *tpa_end,
1506                                            struct rx_tpa_end_cmp_ext *tpa_end1,
1507                                            u8 *event)
1508 {
1509         struct bnxt_napi *bnapi = cpr->bnapi;
1510         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1511         u8 *data_ptr, agg_bufs;
1512         unsigned int len;
1513         struct bnxt_tpa_info *tpa_info;
1514         dma_addr_t mapping;
1515         struct sk_buff *skb;
1516         u16 idx = 0, agg_id;
1517         void *data;
1518         bool gro;
1519
1520         if (unlikely(bnapi->in_reset)) {
1521                 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1522
1523                 if (rc < 0)
1524                         return ERR_PTR(-EBUSY);
1525                 return NULL;
1526         }
1527
1528         if (bp->flags & BNXT_FLAG_CHIP_P5) {
1529                 agg_id = TPA_END_AGG_ID_P5(tpa_end);
1530                 agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1531                 agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1532                 tpa_info = &rxr->rx_tpa[agg_id];
1533                 if (unlikely(agg_bufs != tpa_info->agg_count)) {
1534                         netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1535                                     agg_bufs, tpa_info->agg_count);
1536                         agg_bufs = tpa_info->agg_count;
1537                 }
1538                 tpa_info->agg_count = 0;
1539                 *event |= BNXT_AGG_EVENT;
1540                 bnxt_free_agg_idx(rxr, agg_id);
1541                 idx = agg_id;
1542                 gro = !!(bp->flags & BNXT_FLAG_GRO);
1543         } else {
1544                 agg_id = TPA_END_AGG_ID(tpa_end);
1545                 agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1546                 tpa_info = &rxr->rx_tpa[agg_id];
1547                 idx = RING_CMP(*raw_cons);
1548                 if (agg_bufs) {
1549                         if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1550                                 return ERR_PTR(-EBUSY);
1551
1552                         *event |= BNXT_AGG_EVENT;
1553                         idx = NEXT_CMP(idx);
1554                 }
1555                 gro = !!TPA_END_GRO(tpa_end);
1556         }
1557         data = tpa_info->data;
1558         data_ptr = tpa_info->data_ptr;
1559         prefetch(data_ptr);
1560         len = tpa_info->len;
1561         mapping = tpa_info->mapping;
1562
1563         if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1564                 bnxt_abort_tpa(cpr, idx, agg_bufs);
1565                 if (agg_bufs > MAX_SKB_FRAGS)
1566                         netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1567                                     agg_bufs, (int)MAX_SKB_FRAGS);
1568                 return NULL;
1569         }
1570
1571         if (len <= bp->rx_copy_thresh) {
1572                 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1573                 if (!skb) {
1574                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1575                         return NULL;
1576                 }
1577         } else {
1578                 u8 *new_data;
1579                 dma_addr_t new_mapping;
1580
1581                 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1582                 if (!new_data) {
1583                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1584                         return NULL;
1585                 }
1586
1587                 tpa_info->data = new_data;
1588                 tpa_info->data_ptr = new_data + bp->rx_offset;
1589                 tpa_info->mapping = new_mapping;
1590
1591                 skb = build_skb(data, 0);
1592                 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1593                                        bp->rx_buf_use_size, bp->rx_dir,
1594                                        DMA_ATTR_WEAK_ORDERING);
1595
1596                 if (!skb) {
1597                         kfree(data);
1598                         bnxt_abort_tpa(cpr, idx, agg_bufs);
1599                         return NULL;
1600                 }
1601                 skb_reserve(skb, bp->rx_offset);
1602                 skb_put(skb, len);
1603         }
1604
1605         if (agg_bufs) {
1606                 skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true);
1607                 if (!skb) {
1608                         /* Page reuse already handled by bnxt_rx_pages(). */
1609                         return NULL;
1610                 }
1611         }
1612
1613         skb->protocol =
1614                 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1615
1616         if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1617                 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1618
1619         if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1620             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1621                 u16 vlan_proto = tpa_info->metadata >>
1622                         RX_CMP_FLAGS2_METADATA_TPID_SFT;
1623                 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1624
1625                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1626         }
1627
1628         skb_checksum_none_assert(skb);
1629         if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1630                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1631                 skb->csum_level =
1632                         (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1633         }
1634
1635         if (gro)
1636                 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1637
1638         return skb;
1639 }
1640
1641 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1642                          struct rx_agg_cmp *rx_agg)
1643 {
1644         u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1645         struct bnxt_tpa_info *tpa_info;
1646
1647         agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1648         tpa_info = &rxr->rx_tpa[agg_id];
1649         BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1650         tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1651 }
1652
1653 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1654                              struct sk_buff *skb)
1655 {
1656         if (skb->dev != bp->dev) {
1657                 /* this packet belongs to a vf-rep */
1658                 bnxt_vf_rep_rx(bp, skb);
1659                 return;
1660         }
1661         skb_record_rx_queue(skb, bnapi->index);
1662         napi_gro_receive(&bnapi->napi, skb);
1663 }
1664
1665 /* returns the following:
1666  * 1       - 1 packet successfully received
1667  * 0       - successful TPA_START, packet not completed yet
1668  * -EBUSY  - completion ring does not have all the agg buffers yet
1669  * -ENOMEM - packet aborted due to out of memory
1670  * -EIO    - packet aborted due to hw error indicated in BD
1671  */
1672 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1673                        u32 *raw_cons, u8 *event)
1674 {
1675         struct bnxt_napi *bnapi = cpr->bnapi;
1676         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1677         struct net_device *dev = bp->dev;
1678         struct rx_cmp *rxcmp;
1679         struct rx_cmp_ext *rxcmp1;
1680         u32 tmp_raw_cons = *raw_cons;
1681         u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1682         struct bnxt_sw_rx_bd *rx_buf;
1683         unsigned int len;
1684         u8 *data_ptr, agg_bufs, cmp_type;
1685         dma_addr_t dma_addr;
1686         struct sk_buff *skb;
1687         void *data;
1688         int rc = 0;
1689         u32 misc;
1690
1691         rxcmp = (struct rx_cmp *)
1692                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1693
1694         cmp_type = RX_CMP_TYPE(rxcmp);
1695
1696         if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1697                 bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1698                 goto next_rx_no_prod_no_len;
1699         }
1700
1701         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1702         cp_cons = RING_CMP(tmp_raw_cons);
1703         rxcmp1 = (struct rx_cmp_ext *)
1704                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1705
1706         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1707                 return -EBUSY;
1708
1709         prod = rxr->rx_prod;
1710
1711         if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1712                 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1713                                (struct rx_tpa_start_cmp_ext *)rxcmp1);
1714
1715                 *event |= BNXT_RX_EVENT;
1716                 goto next_rx_no_prod_no_len;
1717
1718         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1719                 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1720                                    (struct rx_tpa_end_cmp *)rxcmp,
1721                                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1722
1723                 if (IS_ERR(skb))
1724                         return -EBUSY;
1725
1726                 rc = -ENOMEM;
1727                 if (likely(skb)) {
1728                         bnxt_deliver_skb(bp, bnapi, skb);
1729                         rc = 1;
1730                 }
1731                 *event |= BNXT_RX_EVENT;
1732                 goto next_rx_no_prod_no_len;
1733         }
1734
1735         cons = rxcmp->rx_cmp_opaque;
1736         if (unlikely(cons != rxr->rx_next_cons)) {
1737                 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
1738
1739                 netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1740                             cons, rxr->rx_next_cons);
1741                 bnxt_sched_reset(bp, rxr);
1742                 return rc1;
1743         }
1744         rx_buf = &rxr->rx_buf_ring[cons];
1745         data = rx_buf->data;
1746         data_ptr = rx_buf->data_ptr;
1747         prefetch(data_ptr);
1748
1749         misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1750         agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1751
1752         if (agg_bufs) {
1753                 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1754                         return -EBUSY;
1755
1756                 cp_cons = NEXT_CMP(cp_cons);
1757                 *event |= BNXT_AGG_EVENT;
1758         }
1759         *event |= BNXT_RX_EVENT;
1760
1761         rx_buf->data = NULL;
1762         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1763                 u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1764
1765                 bnxt_reuse_rx_data(rxr, cons, data);
1766                 if (agg_bufs)
1767                         bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1768                                                false);
1769
1770                 rc = -EIO;
1771                 if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1772                         bnapi->cp_ring.rx_buf_errors++;
1773                         if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
1774                                 netdev_warn(bp->dev, "RX buffer error %x\n",
1775                                             rx_err);
1776                                 bnxt_sched_reset(bp, rxr);
1777                         }
1778                 }
1779                 goto next_rx_no_len;
1780         }
1781
1782         len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1783         dma_addr = rx_buf->mapping;
1784
1785         if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1786                 rc = 1;
1787                 goto next_rx;
1788         }
1789
1790         if (len <= bp->rx_copy_thresh) {
1791                 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1792                 bnxt_reuse_rx_data(rxr, cons, data);
1793                 if (!skb) {
1794                         if (agg_bufs)
1795                                 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1796                                                        agg_bufs, false);
1797                         rc = -ENOMEM;
1798                         goto next_rx;
1799                 }
1800         } else {
1801                 u32 payload;
1802
1803                 if (rx_buf->data_ptr == data_ptr)
1804                         payload = misc & RX_CMP_PAYLOAD_OFFSET;
1805                 else
1806                         payload = 0;
1807                 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1808                                       payload | len);
1809                 if (!skb) {
1810                         rc = -ENOMEM;
1811                         goto next_rx;
1812                 }
1813         }
1814
1815         if (agg_bufs) {
1816                 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false);
1817                 if (!skb) {
1818                         rc = -ENOMEM;
1819                         goto next_rx;
1820                 }
1821         }
1822
1823         if (RX_CMP_HASH_VALID(rxcmp)) {
1824                 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1825                 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1826
1827                 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1828                 if (hash_type != 1 && hash_type != 3)
1829                         type = PKT_HASH_TYPE_L3;
1830                 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1831         }
1832
1833         cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1834         skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
1835
1836         if ((rxcmp1->rx_cmp_flags2 &
1837              cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1838             (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1839                 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1840                 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1841                 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1842
1843                 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1844         }
1845
1846         skb_checksum_none_assert(skb);
1847         if (RX_CMP_L4_CS_OK(rxcmp1)) {
1848                 if (dev->features & NETIF_F_RXCSUM) {
1849                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1850                         skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1851                 }
1852         } else {
1853                 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1854                         if (dev->features & NETIF_F_RXCSUM)
1855                                 bnapi->cp_ring.rx_l4_csum_errors++;
1856                 }
1857         }
1858
1859         bnxt_deliver_skb(bp, bnapi, skb);
1860         rc = 1;
1861
1862 next_rx:
1863         cpr->rx_packets += 1;
1864         cpr->rx_bytes += len;
1865
1866 next_rx_no_len:
1867         rxr->rx_prod = NEXT_RX(prod);
1868         rxr->rx_next_cons = NEXT_RX(cons);
1869
1870 next_rx_no_prod_no_len:
1871         *raw_cons = tmp_raw_cons;
1872
1873         return rc;
1874 }
1875
1876 /* In netpoll mode, if we are using a combined completion ring, we need to
1877  * discard the rx packets and recycle the buffers.
1878  */
1879 static int bnxt_force_rx_discard(struct bnxt *bp,
1880                                  struct bnxt_cp_ring_info *cpr,
1881                                  u32 *raw_cons, u8 *event)
1882 {
1883         u32 tmp_raw_cons = *raw_cons;
1884         struct rx_cmp_ext *rxcmp1;
1885         struct rx_cmp *rxcmp;
1886         u16 cp_cons;
1887         u8 cmp_type;
1888
1889         cp_cons = RING_CMP(tmp_raw_cons);
1890         rxcmp = (struct rx_cmp *)
1891                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1892
1893         tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1894         cp_cons = RING_CMP(tmp_raw_cons);
1895         rxcmp1 = (struct rx_cmp_ext *)
1896                         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1897
1898         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1899                 return -EBUSY;
1900
1901         cmp_type = RX_CMP_TYPE(rxcmp);
1902         if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1903                 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1904                         cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1905         } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1906                 struct rx_tpa_end_cmp_ext *tpa_end1;
1907
1908                 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1909                 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1910                         cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1911         }
1912         return bnxt_rx_pkt(bp, cpr, raw_cons, event);
1913 }
1914
1915 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
1916 {
1917         struct bnxt_fw_health *fw_health = bp->fw_health;
1918         u32 reg = fw_health->regs[reg_idx];
1919         u32 reg_type, reg_off, val = 0;
1920
1921         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
1922         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
1923         switch (reg_type) {
1924         case BNXT_FW_HEALTH_REG_TYPE_CFG:
1925                 pci_read_config_dword(bp->pdev, reg_off, &val);
1926                 break;
1927         case BNXT_FW_HEALTH_REG_TYPE_GRC:
1928                 reg_off = fw_health->mapped_regs[reg_idx];
1929                 /* fall through */
1930         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
1931                 val = readl(bp->bar0 + reg_off);
1932                 break;
1933         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
1934                 val = readl(bp->bar1 + reg_off);
1935                 break;
1936         }
1937         if (reg_idx == BNXT_FW_RESET_INPROG_REG)
1938                 val &= fw_health->fw_reset_inprog_reg_mask;
1939         return val;
1940 }
1941
1942 #define BNXT_GET_EVENT_PORT(data)       \
1943         ((data) &                       \
1944          ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
1945
1946 static int bnxt_async_event_process(struct bnxt *bp,
1947                                     struct hwrm_async_event_cmpl *cmpl)
1948 {
1949         u16 event_id = le16_to_cpu(cmpl->event_id);
1950
1951         /* TODO CHIMP_FW: Define event id's for link change, error etc */
1952         switch (event_id) {
1953         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
1954                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1955                 struct bnxt_link_info *link_info = &bp->link_info;
1956
1957                 if (BNXT_VF(bp))
1958                         goto async_event_process_exit;
1959
1960                 /* print unsupported speed warning in forced speed mode only */
1961                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1962                     (data1 & 0x20000)) {
1963                         u16 fw_speed = link_info->force_link_speed;
1964                         u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1965
1966                         if (speed != SPEED_UNKNOWN)
1967                                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1968                                             speed);
1969                 }
1970                 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
1971         }
1972         /* fall through */
1973         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
1974         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
1975                 set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
1976                 /* fall through */
1977         case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1978                 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1979                 break;
1980         case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
1981                 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
1982                 break;
1983         case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
1984                 u32 data1 = le32_to_cpu(cmpl->event_data1);
1985                 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1986
1987                 if (BNXT_VF(bp))
1988                         break;
1989
1990                 if (bp->pf.port_id != port_id)
1991                         break;
1992
1993                 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1994                 break;
1995         }
1996         case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
1997                 if (BNXT_PF(bp))
1998                         goto async_event_process_exit;
1999                 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2000                 break;
2001         case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2002                 u32 data1 = le32_to_cpu(cmpl->event_data1);
2003
2004                 if (!bp->fw_health)
2005                         goto async_event_process_exit;
2006
2007                 bp->fw_reset_timestamp = jiffies;
2008                 bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2009                 if (!bp->fw_reset_min_dsecs)
2010                         bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2011                 bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2012                 if (!bp->fw_reset_max_dsecs)
2013                         bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2014                 if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2015                         netdev_warn(bp->dev, "Firmware fatal reset event received\n");
2016                         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2017                 } else {
2018                         netdev_warn(bp->dev, "Firmware non-fatal reset event received, max wait time %d msec\n",
2019                                     bp->fw_reset_max_dsecs * 100);
2020                 }
2021                 set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2022                 break;
2023         }
2024         case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2025                 struct bnxt_fw_health *fw_health = bp->fw_health;
2026                 u32 data1 = le32_to_cpu(cmpl->event_data1);
2027
2028                 if (!fw_health)
2029                         goto async_event_process_exit;
2030
2031                 fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
2032                 fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2033                 if (!fw_health->enabled)
2034                         break;
2035
2036                 if (netif_msg_drv(bp))
2037                         netdev_info(bp->dev, "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
2038                                     fw_health->enabled, fw_health->master,
2039                                     bnxt_fw_health_readl(bp,
2040                                                          BNXT_FW_RESET_CNT_REG),
2041                                     bnxt_fw_health_readl(bp,
2042                                                          BNXT_FW_HEALTH_REG));
2043                 fw_health->tmr_multiplier =
2044                         DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2045                                      bp->current_interval * 10);
2046                 fw_health->tmr_counter = fw_health->tmr_multiplier;
2047                 fw_health->last_fw_heartbeat =
2048                         bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2049                 fw_health->last_fw_reset_cnt =
2050                         bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2051                 goto async_event_process_exit;
2052         }
2053         default:
2054                 goto async_event_process_exit;
2055         }
2056         bnxt_queue_sp_work(bp);
2057 async_event_process_exit:
2058         bnxt_ulp_async_events(bp, cmpl);
2059         return 0;
2060 }
2061
2062 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2063 {
2064         u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2065         struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2066         struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2067                                 (struct hwrm_fwd_req_cmpl *)txcmp;
2068
2069         switch (cmpl_type) {
2070         case CMPL_BASE_TYPE_HWRM_DONE:
2071                 seq_id = le16_to_cpu(h_cmpl->sequence_id);
2072                 if (seq_id == bp->hwrm_intr_seq_id)
2073                         bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id;
2074                 else
2075                         netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
2076                 break;
2077
2078         case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2079                 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2080
2081                 if ((vf_id < bp->pf.first_vf_id) ||
2082                     (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2083                         netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2084                                    vf_id);
2085                         return -EINVAL;
2086                 }
2087
2088                 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2089                 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2090                 bnxt_queue_sp_work(bp);
2091                 break;
2092
2093         case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2094                 bnxt_async_event_process(bp,
2095                                          (struct hwrm_async_event_cmpl *)txcmp);
2096
2097         default:
2098                 break;
2099         }
2100
2101         return 0;
2102 }
2103
2104 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2105 {
2106         struct bnxt_napi *bnapi = dev_instance;
2107         struct bnxt *bp = bnapi->bp;
2108         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2109         u32 cons = RING_CMP(cpr->cp_raw_cons);
2110
2111         cpr->event_ctr++;
2112         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2113         napi_schedule(&bnapi->napi);
2114         return IRQ_HANDLED;
2115 }
2116
2117 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2118 {
2119         u32 raw_cons = cpr->cp_raw_cons;
2120         u16 cons = RING_CMP(raw_cons);
2121         struct tx_cmp *txcmp;
2122
2123         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2124
2125         return TX_CMP_VALID(txcmp, raw_cons);
2126 }
2127
2128 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2129 {
2130         struct bnxt_napi *bnapi = dev_instance;
2131         struct bnxt *bp = bnapi->bp;
2132         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2133         u32 cons = RING_CMP(cpr->cp_raw_cons);
2134         u32 int_status;
2135
2136         prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2137
2138         if (!bnxt_has_work(bp, cpr)) {
2139                 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2140                 /* return if erroneous interrupt */
2141                 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2142                         return IRQ_NONE;
2143         }
2144
2145         /* disable ring IRQ */
2146         BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2147
2148         /* Return here if interrupt is shared and is disabled. */
2149         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2150                 return IRQ_HANDLED;
2151
2152         napi_schedule(&bnapi->napi);
2153         return IRQ_HANDLED;
2154 }
2155
2156 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2157                             int budget)
2158 {
2159         struct bnxt_napi *bnapi = cpr->bnapi;
2160         u32 raw_cons = cpr->cp_raw_cons;
2161         u32 cons;
2162         int tx_pkts = 0;
2163         int rx_pkts = 0;
2164         u8 event = 0;
2165         struct tx_cmp *txcmp;
2166
2167         cpr->has_more_work = 0;
2168         while (1) {
2169                 int rc;
2170
2171                 cons = RING_CMP(raw_cons);
2172                 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2173
2174                 if (!TX_CMP_VALID(txcmp, raw_cons))
2175                         break;
2176
2177                 /* The valid test of the entry must be done first before
2178                  * reading any further.
2179                  */
2180                 dma_rmb();
2181                 cpr->had_work_done = 1;
2182                 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2183                         tx_pkts++;
2184                         /* return full budget so NAPI will complete. */
2185                         if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
2186                                 rx_pkts = budget;
2187                                 raw_cons = NEXT_RAW_CMP(raw_cons);
2188                                 if (budget)
2189                                         cpr->has_more_work = 1;
2190                                 break;
2191                         }
2192                 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2193                         if (likely(budget))
2194                                 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2195                         else
2196                                 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2197                                                            &event);
2198                         if (likely(rc >= 0))
2199                                 rx_pkts += rc;
2200                         /* Increment rx_pkts when rc is -ENOMEM to count towards
2201                          * the NAPI budget.  Otherwise, we may potentially loop
2202                          * here forever if we consistently cannot allocate
2203                          * buffers.
2204                          */
2205                         else if (rc == -ENOMEM && budget)
2206                                 rx_pkts++;
2207                         else if (rc == -EBUSY)  /* partial completion */
2208                                 break;
2209                 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2210                                      CMPL_BASE_TYPE_HWRM_DONE) ||
2211                                     (TX_CMP_TYPE(txcmp) ==
2212                                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2213                                     (TX_CMP_TYPE(txcmp) ==
2214                                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2215                         bnxt_hwrm_handler(bp, txcmp);
2216                 }
2217                 raw_cons = NEXT_RAW_CMP(raw_cons);
2218
2219                 if (rx_pkts && rx_pkts == budget) {
2220                         cpr->has_more_work = 1;
2221                         break;
2222                 }
2223         }
2224
2225         if (event & BNXT_REDIRECT_EVENT)
2226                 xdp_do_flush_map();
2227
2228         if (event & BNXT_TX_EVENT) {
2229                 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2230                 u16 prod = txr->tx_prod;
2231
2232                 /* Sync BD data before updating doorbell */
2233                 wmb();
2234
2235                 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2236         }
2237
2238         cpr->cp_raw_cons = raw_cons;
2239         bnapi->tx_pkts += tx_pkts;
2240         bnapi->events |= event;
2241         return rx_pkts;
2242 }
2243
2244 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2245 {
2246         if (bnapi->tx_pkts) {
2247                 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2248                 bnapi->tx_pkts = 0;
2249         }
2250
2251         if (bnapi->events & BNXT_RX_EVENT) {
2252                 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2253
2254                 if (bnapi->events & BNXT_AGG_EVENT)
2255                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2256                 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2257         }
2258         bnapi->events = 0;
2259 }
2260
2261 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2262                           int budget)
2263 {
2264         struct bnxt_napi *bnapi = cpr->bnapi;
2265         int rx_pkts;
2266
2267         rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2268
2269         /* ACK completion ring before freeing tx ring and producing new
2270          * buffers in rx/agg rings to prevent overflowing the completion
2271          * ring.
2272          */
2273         bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2274
2275         __bnxt_poll_work_done(bp, bnapi);
2276         return rx_pkts;
2277 }
2278
2279 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2280 {
2281         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2282         struct bnxt *bp = bnapi->bp;
2283         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2284         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2285         struct tx_cmp *txcmp;
2286         struct rx_cmp_ext *rxcmp1;
2287         u32 cp_cons, tmp_raw_cons;
2288         u32 raw_cons = cpr->cp_raw_cons;
2289         u32 rx_pkts = 0;
2290         u8 event = 0;
2291
2292         while (1) {
2293                 int rc;
2294
2295                 cp_cons = RING_CMP(raw_cons);
2296                 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2297
2298                 if (!TX_CMP_VALID(txcmp, raw_cons))
2299                         break;
2300
2301                 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2302                         tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2303                         cp_cons = RING_CMP(tmp_raw_cons);
2304                         rxcmp1 = (struct rx_cmp_ext *)
2305                           &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2306
2307                         if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2308                                 break;
2309
2310                         /* force an error to recycle the buffer */
2311                         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2312                                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2313
2314                         rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2315                         if (likely(rc == -EIO) && budget)
2316                                 rx_pkts++;
2317                         else if (rc == -EBUSY)  /* partial completion */
2318                                 break;
2319                 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2320                                     CMPL_BASE_TYPE_HWRM_DONE)) {
2321                         bnxt_hwrm_handler(bp, txcmp);
2322                 } else {
2323                         netdev_err(bp->dev,
2324                                    "Invalid completion received on special ring\n");
2325                 }
2326                 raw_cons = NEXT_RAW_CMP(raw_cons);
2327
2328                 if (rx_pkts == budget)
2329                         break;
2330         }
2331
2332         cpr->cp_raw_cons = raw_cons;
2333         BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2334         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2335
2336         if (event & BNXT_AGG_EVENT)
2337                 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2338
2339         if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2340                 napi_complete_done(napi, rx_pkts);
2341                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2342         }
2343         return rx_pkts;
2344 }
2345
2346 static int bnxt_poll(struct napi_struct *napi, int budget)
2347 {
2348         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2349         struct bnxt *bp = bnapi->bp;
2350         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2351         int work_done = 0;
2352
2353         while (1) {
2354                 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2355
2356                 if (work_done >= budget) {
2357                         if (!budget)
2358                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2359                         break;
2360                 }
2361
2362                 if (!bnxt_has_work(bp, cpr)) {
2363                         if (napi_complete_done(napi, work_done))
2364                                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2365                         break;
2366                 }
2367         }
2368         if (bp->flags & BNXT_FLAG_DIM) {
2369                 struct dim_sample dim_sample = {};
2370
2371                 dim_update_sample(cpr->event_ctr,
2372                                   cpr->rx_packets,
2373                                   cpr->rx_bytes,
2374                                   &dim_sample);
2375                 net_dim(&cpr->dim, dim_sample);
2376         }
2377         return work_done;
2378 }
2379
2380 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2381 {
2382         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2383         int i, work_done = 0;
2384
2385         for (i = 0; i < 2; i++) {
2386                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2387
2388                 if (cpr2) {
2389                         work_done += __bnxt_poll_work(bp, cpr2,
2390                                                       budget - work_done);
2391                         cpr->has_more_work |= cpr2->has_more_work;
2392                 }
2393         }
2394         return work_done;
2395 }
2396
2397 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2398                                  u64 dbr_type, bool all)
2399 {
2400         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2401         int i;
2402
2403         for (i = 0; i < 2; i++) {
2404                 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2405                 struct bnxt_db_info *db;
2406
2407                 if (cpr2 && (all || cpr2->had_work_done)) {
2408                         db = &cpr2->cp_db;
2409                         writeq(db->db_key64 | dbr_type |
2410                                RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2411                         cpr2->had_work_done = 0;
2412                 }
2413         }
2414         __bnxt_poll_work_done(bp, bnapi);
2415 }
2416
2417 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2418 {
2419         struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2420         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2421         u32 raw_cons = cpr->cp_raw_cons;
2422         struct bnxt *bp = bnapi->bp;
2423         struct nqe_cn *nqcmp;
2424         int work_done = 0;
2425         u32 cons;
2426
2427         if (cpr->has_more_work) {
2428                 cpr->has_more_work = 0;
2429                 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2430                 if (cpr->has_more_work) {
2431                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
2432                         return work_done;
2433                 }
2434                 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
2435                 if (napi_complete_done(napi, work_done))
2436                         BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
2437                 return work_done;
2438         }
2439         while (1) {
2440                 cons = RING_CMP(raw_cons);
2441                 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2442
2443                 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2444                         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2445                                              false);
2446                         cpr->cp_raw_cons = raw_cons;
2447                         if (napi_complete_done(napi, work_done))
2448                                 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2449                                                   cpr->cp_raw_cons);
2450                         return work_done;
2451                 }
2452
2453                 /* The valid test of the entry must be done first before
2454                  * reading any further.
2455                  */
2456                 dma_rmb();
2457
2458                 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2459                         u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2460                         struct bnxt_cp_ring_info *cpr2;
2461
2462                         cpr2 = cpr->cp_ring_arr[idx];
2463                         work_done += __bnxt_poll_work(bp, cpr2,
2464                                                       budget - work_done);
2465                         cpr->has_more_work = cpr2->has_more_work;
2466                 } else {
2467                         bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2468                 }
2469                 raw_cons = NEXT_RAW_CMP(raw_cons);
2470                 if (cpr->has_more_work)
2471                         break;
2472         }
2473         __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
2474         cpr->cp_raw_cons = raw_cons;
2475         return work_done;
2476 }
2477
2478 static void bnxt_free_tx_skbs(struct bnxt *bp)
2479 {
2480         int i, max_idx;
2481         struct pci_dev *pdev = bp->pdev;
2482
2483         if (!bp->tx_ring)
2484                 return;
2485
2486         max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2487         for (i = 0; i < bp->tx_nr_rings; i++) {
2488                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2489                 int j;
2490
2491                 for (j = 0; j < max_idx;) {
2492                         struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2493                         struct sk_buff *skb;
2494                         int k, last;
2495
2496                         if (i < bp->tx_nr_rings_xdp &&
2497                             tx_buf->action == XDP_REDIRECT) {
2498                                 dma_unmap_single(&pdev->dev,
2499                                         dma_unmap_addr(tx_buf, mapping),
2500                                         dma_unmap_len(tx_buf, len),
2501                                         PCI_DMA_TODEVICE);
2502                                 xdp_return_frame(tx_buf->xdpf);
2503                                 tx_buf->action = 0;
2504                                 tx_buf->xdpf = NULL;
2505                                 j++;
2506                                 continue;
2507                         }
2508
2509                         skb = tx_buf->skb;
2510                         if (!skb) {
2511                                 j++;
2512                                 continue;
2513                         }
2514
2515                         tx_buf->skb = NULL;
2516
2517                         if (tx_buf->is_push) {
2518                                 dev_kfree_skb(skb);
2519                                 j += 2;
2520                                 continue;
2521                         }
2522
2523                         dma_unmap_single(&pdev->dev,
2524                                          dma_unmap_addr(tx_buf, mapping),
2525                                          skb_headlen(skb),
2526                                          PCI_DMA_TODEVICE);
2527
2528                         last = tx_buf->nr_frags;
2529                         j += 2;
2530                         for (k = 0; k < last; k++, j++) {
2531                                 int ring_idx = j & bp->tx_ring_mask;
2532                                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2533
2534                                 tx_buf = &txr->tx_buf_ring[ring_idx];
2535                                 dma_unmap_page(
2536                                         &pdev->dev,
2537                                         dma_unmap_addr(tx_buf, mapping),
2538                                         skb_frag_size(frag), PCI_DMA_TODEVICE);
2539                         }
2540                         dev_kfree_skb(skb);
2541                 }
2542                 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2543         }
2544 }
2545
2546 static void bnxt_free_rx_skbs(struct bnxt *bp)
2547 {
2548         int i, max_idx, max_agg_idx;
2549         struct pci_dev *pdev = bp->pdev;
2550
2551         if (!bp->rx_ring)
2552                 return;
2553
2554         max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2555         max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2556         for (i = 0; i < bp->rx_nr_rings; i++) {
2557                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2558                 struct bnxt_tpa_idx_map *map;
2559                 int j;
2560
2561                 if (rxr->rx_tpa) {
2562                         for (j = 0; j < bp->max_tpa; j++) {
2563                                 struct bnxt_tpa_info *tpa_info =
2564                                                         &rxr->rx_tpa[j];
2565                                 u8 *data = tpa_info->data;
2566
2567                                 if (!data)
2568                                         continue;
2569
2570                                 dma_unmap_single_attrs(&pdev->dev,
2571                                                        tpa_info->mapping,
2572                                                        bp->rx_buf_use_size,
2573                                                        bp->rx_dir,
2574                                                        DMA_ATTR_WEAK_ORDERING);
2575
2576                                 tpa_info->data = NULL;
2577
2578                                 kfree(data);
2579                         }
2580                 }
2581
2582                 for (j = 0; j < max_idx; j++) {
2583                         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
2584                         dma_addr_t mapping = rx_buf->mapping;
2585                         void *data = rx_buf->data;
2586
2587                         if (!data)
2588                                 continue;
2589
2590                         rx_buf->data = NULL;
2591
2592                         if (BNXT_RX_PAGE_MODE(bp)) {
2593                                 mapping -= bp->rx_dma_offset;
2594                                 dma_unmap_page_attrs(&pdev->dev, mapping,
2595                                                      PAGE_SIZE, bp->rx_dir,
2596                                                      DMA_ATTR_WEAK_ORDERING);
2597                                 page_pool_recycle_direct(rxr->page_pool, data);
2598                         } else {
2599                                 dma_unmap_single_attrs(&pdev->dev, mapping,
2600                                                        bp->rx_buf_use_size,
2601                                                        bp->rx_dir,
2602                                                        DMA_ATTR_WEAK_ORDERING);
2603                                 kfree(data);
2604                         }
2605                 }
2606
2607                 for (j = 0; j < max_agg_idx; j++) {
2608                         struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2609                                 &rxr->rx_agg_ring[j];
2610                         struct page *page = rx_agg_buf->page;
2611
2612                         if (!page)
2613                                 continue;
2614
2615                         dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2616                                              BNXT_RX_PAGE_SIZE,
2617                                              PCI_DMA_FROMDEVICE,
2618                                              DMA_ATTR_WEAK_ORDERING);
2619
2620                         rx_agg_buf->page = NULL;
2621                         __clear_bit(j, rxr->rx_agg_bmap);
2622
2623                         __free_page(page);
2624                 }
2625                 if (rxr->rx_page) {
2626                         __free_page(rxr->rx_page);
2627                         rxr->rx_page = NULL;
2628                 }
2629                 map = rxr->rx_tpa_idx_map;
2630                 if (map)
2631                         memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
2632         }
2633 }
2634
2635 static void bnxt_free_skbs(struct bnxt *bp)
2636 {
2637         bnxt_free_tx_skbs(bp);
2638         bnxt_free_rx_skbs(bp);
2639 }
2640
2641 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2642 {
2643         struct pci_dev *pdev = bp->pdev;
2644         int i;
2645
2646         for (i = 0; i < rmem->nr_pages; i++) {
2647                 if (!rmem->pg_arr[i])
2648                         continue;
2649
2650                 dma_free_coherent(&pdev->dev, rmem->page_size,
2651                                   rmem->pg_arr[i], rmem->dma_arr[i]);
2652
2653                 rmem->pg_arr[i] = NULL;
2654         }
2655         if (rmem->pg_tbl) {
2656                 size_t pg_tbl_size = rmem->nr_pages * 8;
2657
2658                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2659                         pg_tbl_size = rmem->page_size;
2660                 dma_free_coherent(&pdev->dev, pg_tbl_size,
2661                                   rmem->pg_tbl, rmem->pg_tbl_map);
2662                 rmem->pg_tbl = NULL;
2663         }
2664         if (rmem->vmem_size && *rmem->vmem) {
2665                 vfree(*rmem->vmem);
2666                 *rmem->vmem = NULL;
2667         }
2668 }
2669
2670 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
2671 {
2672         struct pci_dev *pdev = bp->pdev;
2673         u64 valid_bit = 0;
2674         int i;
2675
2676         if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2677                 valid_bit = PTU_PTE_VALID;
2678         if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
2679                 size_t pg_tbl_size = rmem->nr_pages * 8;
2680
2681                 if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
2682                         pg_tbl_size = rmem->page_size;
2683                 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
2684                                                   &rmem->pg_tbl_map,
2685                                                   GFP_KERNEL);
2686                 if (!rmem->pg_tbl)
2687                         return -ENOMEM;
2688         }
2689
2690         for (i = 0; i < rmem->nr_pages; i++) {
2691                 u64 extra_bits = valid_bit;
2692
2693                 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2694                                                      rmem->page_size,
2695                                                      &rmem->dma_arr[i],
2696                                                      GFP_KERNEL);
2697                 if (!rmem->pg_arr[i])
2698                         return -ENOMEM;
2699
2700                 if (rmem->init_val)
2701                         memset(rmem->pg_arr[i], rmem->init_val,
2702                                rmem->page_size);
2703                 if (rmem->nr_pages > 1 || rmem->depth > 0) {
2704                         if (i == rmem->nr_pages - 2 &&
2705                             (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2706                                 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2707                         else if (i == rmem->nr_pages - 1 &&
2708                                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2709                                 extra_bits |= PTU_PTE_LAST;
2710                         rmem->pg_tbl[i] =
2711                                 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2712                 }
2713         }
2714
2715         if (rmem->vmem_size) {
2716                 *rmem->vmem = vzalloc(rmem->vmem_size);
2717                 if (!(*rmem->vmem))
2718                         return -ENOMEM;
2719         }
2720         return 0;
2721 }
2722
2723 static void bnxt_free_tpa_info(struct bnxt *bp)
2724 {
2725         int i;
2726
2727         for (i = 0; i < bp->rx_nr_rings; i++) {
2728                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2729
2730                 kfree(rxr->rx_tpa_idx_map);
2731                 rxr->rx_tpa_idx_map = NULL;
2732                 if (rxr->rx_tpa) {
2733                         kfree(rxr->rx_tpa[0].agg_arr);
2734                         rxr->rx_tpa[0].agg_arr = NULL;
2735                 }
2736                 kfree(rxr->rx_tpa);
2737                 rxr->rx_tpa = NULL;
2738         }
2739 }
2740
2741 static int bnxt_alloc_tpa_info(struct bnxt *bp)
2742 {
2743         int i, j, total_aggs = 0;
2744
2745         bp->max_tpa = MAX_TPA;
2746         if (bp->flags & BNXT_FLAG_CHIP_P5) {
2747                 if (!bp->max_tpa_v2)
2748                         return 0;
2749                 bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
2750                 total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
2751         }
2752
2753         for (i = 0; i < bp->rx_nr_rings; i++) {
2754                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2755                 struct rx_agg_cmp *agg;
2756
2757                 rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
2758                                       GFP_KERNEL);
2759                 if (!rxr->rx_tpa)
2760                         return -ENOMEM;
2761
2762                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2763                         continue;
2764                 agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
2765                 rxr->rx_tpa[0].agg_arr = agg;
2766                 if (!agg)
2767                         return -ENOMEM;
2768                 for (j = 1; j < bp->max_tpa; j++)
2769                         rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
2770                 rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
2771                                               GFP_KERNEL);
2772                 if (!rxr->rx_tpa_idx_map)
2773                         return -ENOMEM;
2774         }
2775         return 0;
2776 }
2777
2778 static void bnxt_free_rx_rings(struct bnxt *bp)
2779 {
2780         int i;
2781
2782         if (!bp->rx_ring)
2783                 return;
2784
2785         bnxt_free_tpa_info(bp);
2786         for (i = 0; i < bp->rx_nr_rings; i++) {
2787                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2788                 struct bnxt_ring_struct *ring;
2789
2790                 if (rxr->xdp_prog)
2791                         bpf_prog_put(rxr->xdp_prog);
2792
2793                 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2794                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
2795
2796                 page_pool_destroy(rxr->page_pool);
2797                 rxr->page_pool = NULL;
2798
2799                 kfree(rxr->rx_agg_bmap);
2800                 rxr->rx_agg_bmap = NULL;
2801
2802                 ring = &rxr->rx_ring_struct;
2803                 bnxt_free_ring(bp, &ring->ring_mem);
2804
2805                 ring = &rxr->rx_agg_ring_struct;
2806                 bnxt_free_ring(bp, &ring->ring_mem);
2807         }
2808 }
2809
2810 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
2811                                    struct bnxt_rx_ring_info *rxr)
2812 {
2813         struct page_pool_params pp = { 0 };
2814
2815         pp.pool_size = bp->rx_ring_size;
2816         pp.nid = dev_to_node(&bp->pdev->dev);
2817         pp.dev = &bp->pdev->dev;
2818         pp.dma_dir = DMA_BIDIRECTIONAL;
2819
2820         rxr->page_pool = page_pool_create(&pp);
2821         if (IS_ERR(rxr->page_pool)) {
2822                 int err = PTR_ERR(rxr->page_pool);
2823
2824                 rxr->page_pool = NULL;
2825                 return err;
2826         }
2827         return 0;
2828 }
2829
2830 static int bnxt_alloc_rx_rings(struct bnxt *bp)
2831 {
2832         int i, rc = 0, agg_rings = 0;
2833
2834         if (!bp->rx_ring)
2835                 return -ENOMEM;
2836
2837         if (bp->flags & BNXT_FLAG_AGG_RINGS)
2838                 agg_rings = 1;
2839
2840         for (i = 0; i < bp->rx_nr_rings; i++) {
2841                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
2842                 struct bnxt_ring_struct *ring;
2843
2844                 ring = &rxr->rx_ring_struct;
2845
2846                 rc = bnxt_alloc_rx_page_pool(bp, rxr);
2847                 if (rc)
2848                         return rc;
2849
2850                 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2851                 if (rc < 0)
2852                         return rc;
2853
2854                 rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
2855                                                 MEM_TYPE_PAGE_POOL,
2856                                                 rxr->page_pool);
2857                 if (rc) {
2858                         xdp_rxq_info_unreg(&rxr->xdp_rxq);
2859                         return rc;
2860                 }
2861
2862                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2863                 if (rc)
2864                         return rc;
2865
2866                 ring->grp_idx = i;
2867                 if (agg_rings) {
2868                         u16 mem_size;
2869
2870                         ring = &rxr->rx_agg_ring_struct;
2871                         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2872                         if (rc)
2873                                 return rc;
2874
2875                         ring->grp_idx = i;
2876                         rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2877                         mem_size = rxr->rx_agg_bmap_size / 8;
2878                         rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2879                         if (!rxr->rx_agg_bmap)
2880                                 return -ENOMEM;
2881                 }
2882         }
2883         if (bp->flags & BNXT_FLAG_TPA)
2884                 rc = bnxt_alloc_tpa_info(bp);
2885         return rc;
2886 }
2887
2888 static void bnxt_free_tx_rings(struct bnxt *bp)
2889 {
2890         int i;
2891         struct pci_dev *pdev = bp->pdev;
2892
2893         if (!bp->tx_ring)
2894                 return;
2895
2896         for (i = 0; i < bp->tx_nr_rings; i++) {
2897                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2898                 struct bnxt_ring_struct *ring;
2899
2900                 if (txr->tx_push) {
2901                         dma_free_coherent(&pdev->dev, bp->tx_push_size,
2902                                           txr->tx_push, txr->tx_push_mapping);
2903                         txr->tx_push = NULL;
2904                 }
2905
2906                 ring = &txr->tx_ring_struct;
2907
2908                 bnxt_free_ring(bp, &ring->ring_mem);
2909         }
2910 }
2911
2912 static int bnxt_alloc_tx_rings(struct bnxt *bp)
2913 {
2914         int i, j, rc;
2915         struct pci_dev *pdev = bp->pdev;
2916
2917         bp->tx_push_size = 0;
2918         if (bp->tx_push_thresh) {
2919                 int push_size;
2920
2921                 push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2922                                         bp->tx_push_thresh);
2923
2924                 if (push_size > 256) {
2925                         push_size = 0;
2926                         bp->tx_push_thresh = 0;
2927                 }
2928
2929                 bp->tx_push_size = push_size;
2930         }
2931
2932         for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
2933                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2934                 struct bnxt_ring_struct *ring;
2935                 u8 qidx;
2936
2937                 ring = &txr->tx_ring_struct;
2938
2939                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
2940                 if (rc)
2941                         return rc;
2942
2943                 ring->grp_idx = txr->bnapi->index;
2944                 if (bp->tx_push_size) {
2945                         dma_addr_t mapping;
2946
2947                         /* One pre-allocated DMA buffer to backup
2948                          * TX push operation
2949                          */
2950                         txr->tx_push = dma_alloc_coherent(&pdev->dev,
2951                                                 bp->tx_push_size,
2952                                                 &txr->tx_push_mapping,
2953                                                 GFP_KERNEL);
2954
2955                         if (!txr->tx_push)
2956                                 return -ENOMEM;
2957
2958                         mapping = txr->tx_push_mapping +
2959                                 sizeof(struct tx_push_bd);
2960                         txr->data_mapping = cpu_to_le64(mapping);
2961                 }
2962                 qidx = bp->tc_to_qidx[j];
2963                 ring->queue_id = bp->q_info[qidx].queue_id;
2964                 if (i < bp->tx_nr_rings_xdp)
2965                         continue;
2966                 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2967                         j++;
2968         }
2969         return 0;
2970 }
2971
2972 static void bnxt_free_cp_rings(struct bnxt *bp)
2973 {
2974         int i;
2975
2976         if (!bp->bnapi)
2977                 return;
2978
2979         for (i = 0; i < bp->cp_nr_rings; i++) {
2980                 struct bnxt_napi *bnapi = bp->bnapi[i];
2981                 struct bnxt_cp_ring_info *cpr;
2982                 struct bnxt_ring_struct *ring;
2983                 int j;
2984
2985                 if (!bnapi)
2986                         continue;
2987
2988                 cpr = &bnapi->cp_ring;
2989                 ring = &cpr->cp_ring_struct;
2990
2991                 bnxt_free_ring(bp, &ring->ring_mem);
2992
2993                 for (j = 0; j < 2; j++) {
2994                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2995
2996                         if (cpr2) {
2997                                 ring = &cpr2->cp_ring_struct;
2998                                 bnxt_free_ring(bp, &ring->ring_mem);
2999                                 kfree(cpr2);
3000                                 cpr->cp_ring_arr[j] = NULL;
3001                         }
3002                 }
3003         }
3004 }
3005
3006 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3007 {
3008         struct bnxt_ring_mem_info *rmem;
3009         struct bnxt_ring_struct *ring;
3010         struct bnxt_cp_ring_info *cpr;
3011         int rc;
3012
3013         cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3014         if (!cpr)
3015                 return NULL;
3016
3017         ring = &cpr->cp_ring_struct;
3018         rmem = &ring->ring_mem;
3019         rmem->nr_pages = bp->cp_nr_pages;
3020         rmem->page_size = HW_CMPD_RING_SIZE;
3021         rmem->pg_arr = (void **)cpr->cp_desc_ring;
3022         rmem->dma_arr = cpr->cp_desc_mapping;
3023         rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3024         rc = bnxt_alloc_ring(bp, rmem);
3025         if (rc) {
3026                 bnxt_free_ring(bp, rmem);
3027                 kfree(cpr);
3028                 cpr = NULL;
3029         }
3030         return cpr;
3031 }
3032
3033 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3034 {
3035         bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3036         int i, rc, ulp_base_vec, ulp_msix;
3037
3038         ulp_msix = bnxt_get_ulp_msix_num(bp);
3039         ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3040         for (i = 0; i < bp->cp_nr_rings; i++) {
3041                 struct bnxt_napi *bnapi = bp->bnapi[i];
3042                 struct bnxt_cp_ring_info *cpr;
3043                 struct bnxt_ring_struct *ring;
3044
3045                 if (!bnapi)
3046                         continue;
3047
3048                 cpr = &bnapi->cp_ring;
3049                 cpr->bnapi = bnapi;
3050                 ring = &cpr->cp_ring_struct;
3051
3052                 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3053                 if (rc)
3054                         return rc;
3055
3056                 if (ulp_msix && i >= ulp_base_vec)
3057                         ring->map_idx = i + ulp_msix;
3058                 else
3059                         ring->map_idx = i;
3060
3061                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3062                         continue;
3063
3064                 if (i < bp->rx_nr_rings) {
3065                         struct bnxt_cp_ring_info *cpr2 =
3066                                 bnxt_alloc_cp_sub_ring(bp);
3067
3068                         cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3069                         if (!cpr2)
3070                                 return -ENOMEM;
3071                         cpr2->bnapi = bnapi;
3072                 }
3073                 if ((sh && i < bp->tx_nr_rings) ||
3074                     (!sh && i >= bp->rx_nr_rings)) {
3075                         struct bnxt_cp_ring_info *cpr2 =
3076                                 bnxt_alloc_cp_sub_ring(bp);
3077
3078                         cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3079                         if (!cpr2)
3080                                 return -ENOMEM;
3081                         cpr2->bnapi = bnapi;
3082                 }
3083         }
3084         return 0;
3085 }
3086
3087 static void bnxt_init_ring_struct(struct bnxt *bp)
3088 {
3089         int i;
3090
3091         for (i = 0; i < bp->cp_nr_rings; i++) {
3092                 struct bnxt_napi *bnapi = bp->bnapi[i];
3093                 struct bnxt_ring_mem_info *rmem;
3094                 struct bnxt_cp_ring_info *cpr;
3095                 struct bnxt_rx_ring_info *rxr;
3096                 struct bnxt_tx_ring_info *txr;
3097                 struct bnxt_ring_struct *ring;
3098
3099                 if (!bnapi)
3100                         continue;
3101
3102                 cpr = &bnapi->cp_ring;
3103                 ring = &cpr->cp_ring_struct;
3104                 rmem = &ring->ring_mem;
3105                 rmem->nr_pages = bp->cp_nr_pages;
3106                 rmem->page_size = HW_CMPD_RING_SIZE;
3107                 rmem->pg_arr = (void **)cpr->cp_desc_ring;
3108                 rmem->dma_arr = cpr->cp_desc_mapping;
3109                 rmem->vmem_size = 0;
3110
3111                 rxr = bnapi->rx_ring;
3112                 if (!rxr)
3113                         goto skip_rx;
3114
3115                 ring = &rxr->rx_ring_struct;
3116                 rmem = &ring->ring_mem;
3117                 rmem->nr_pages = bp->rx_nr_pages;
3118                 rmem->page_size = HW_RXBD_RING_SIZE;
3119                 rmem->pg_arr = (void **)rxr->rx_desc_ring;
3120                 rmem->dma_arr = rxr->rx_desc_mapping;
3121                 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3122                 rmem->vmem = (void **)&rxr->rx_buf_ring;
3123
3124                 ring = &rxr->rx_agg_ring_struct;
3125                 rmem = &ring->ring_mem;
3126                 rmem->nr_pages = bp->rx_agg_nr_pages;
3127                 rmem->page_size = HW_RXBD_RING_SIZE;
3128                 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3129                 rmem->dma_arr = rxr->rx_agg_desc_mapping;
3130                 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3131                 rmem->vmem = (void **)&rxr->rx_agg_ring;
3132
3133 skip_rx:
3134                 txr = bnapi->tx_ring;
3135                 if (!txr)
3136                         continue;
3137
3138                 ring = &txr->tx_ring_struct;
3139                 rmem = &ring->ring_mem;
3140                 rmem->nr_pages = bp->tx_nr_pages;
3141                 rmem->page_size = HW_RXBD_RING_SIZE;
3142                 rmem->pg_arr = (void **)txr->tx_desc_ring;
3143                 rmem->dma_arr = txr->tx_desc_mapping;
3144                 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3145                 rmem->vmem = (void **)&txr->tx_buf_ring;
3146         }
3147 }
3148
3149 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3150 {
3151         int i;
3152         u32 prod;
3153         struct rx_bd **rx_buf_ring;
3154
3155         rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3156         for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3157                 int j;
3158                 struct rx_bd *rxbd;
3159
3160                 rxbd = rx_buf_ring[i];
3161                 if (!rxbd)
3162                         continue;
3163
3164                 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3165                         rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3166                         rxbd->rx_bd_opaque = prod;
3167                 }
3168         }
3169 }
3170
3171 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3172 {
3173         struct net_device *dev = bp->dev;
3174         struct bnxt_rx_ring_info *rxr;
3175         struct bnxt_ring_struct *ring;
3176         u32 prod, type;
3177         int i;
3178
3179         type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3180                 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3181
3182         if (NET_IP_ALIGN == 2)
3183                 type |= RX_BD_FLAGS_SOP;
3184
3185         rxr = &bp->rx_ring[ring_nr];
3186         ring = &rxr->rx_ring_struct;
3187         bnxt_init_rxbd_pages(ring, type);
3188
3189         if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3190                 bpf_prog_add(bp->xdp_prog, 1);
3191                 rxr->xdp_prog = bp->xdp_prog;
3192         }
3193         prod = rxr->rx_prod;
3194         for (i = 0; i < bp->rx_ring_size; i++) {
3195                 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
3196                         netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3197                                     ring_nr, i, bp->rx_ring_size);
3198                         break;
3199                 }
3200                 prod = NEXT_RX(prod);
3201         }
3202         rxr->rx_prod = prod;
3203         ring->fw_ring_id = INVALID_HW_RING_ID;
3204
3205         ring = &rxr->rx_agg_ring_struct;
3206         ring->fw_ring_id = INVALID_HW_RING_ID;
3207
3208         if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3209                 return 0;
3210
3211         type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3212                 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3213
3214         bnxt_init_rxbd_pages(ring, type);
3215
3216         prod = rxr->rx_agg_prod;
3217         for (i = 0; i < bp->rx_agg_ring_size; i++) {
3218                 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
3219                         netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3220                                     ring_nr, i, bp->rx_ring_size);
3221                         break;
3222                 }
3223                 prod = NEXT_RX_AGG(prod);
3224         }
3225         rxr->rx_agg_prod = prod;
3226
3227         if (bp->flags & BNXT_FLAG_TPA) {
3228                 if (rxr->rx_tpa) {
3229                         u8 *data;
3230                         dma_addr_t mapping;
3231
3232                         for (i = 0; i < bp->max_tpa; i++) {
3233                                 data = __bnxt_alloc_rx_data(bp, &mapping,
3234                                                             GFP_KERNEL);
3235                                 if (!data)
3236                                         return -ENOMEM;
3237
3238                                 rxr->rx_tpa[i].data = data;
3239                                 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3240                                 rxr->rx_tpa[i].mapping = mapping;
3241                         }
3242                 } else {
3243                         netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
3244                         return -ENOMEM;
3245                 }
3246         }
3247
3248         return 0;
3249 }
3250
3251 static void bnxt_init_cp_rings(struct bnxt *bp)
3252 {
3253         int i, j;
3254
3255         for (i = 0; i < bp->cp_nr_rings; i++) {
3256                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3257                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3258
3259                 ring->fw_ring_id = INVALID_HW_RING_ID;
3260                 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3261                 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3262                 for (j = 0; j < 2; j++) {
3263                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3264
3265                         if (!cpr2)
3266                                 continue;
3267
3268                         ring = &cpr2->cp_ring_struct;
3269                         ring->fw_ring_id = INVALID_HW_RING_ID;
3270                         cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3271                         cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3272                 }
3273         }
3274 }
3275
3276 static int bnxt_init_rx_rings(struct bnxt *bp)
3277 {
3278         int i, rc = 0;
3279
3280         if (BNXT_RX_PAGE_MODE(bp)) {
3281                 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3282                 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3283         } else {
3284                 bp->rx_offset = BNXT_RX_OFFSET;
3285                 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3286         }
3287
3288         for (i = 0; i < bp->rx_nr_rings; i++) {
3289                 rc = bnxt_init_one_rx_ring(bp, i);
3290                 if (rc)
3291                         break;
3292         }
3293
3294         return rc;
3295 }
3296
3297 static int bnxt_init_tx_rings(struct bnxt *bp)
3298 {
3299         u16 i;
3300
3301         bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3302                                    MAX_SKB_FRAGS + 1);
3303
3304         for (i = 0; i < bp->tx_nr_rings; i++) {
3305                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3306                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3307
3308                 ring->fw_ring_id = INVALID_HW_RING_ID;
3309         }
3310
3311         return 0;
3312 }
3313
3314 static void bnxt_free_ring_grps(struct bnxt *bp)
3315 {
3316         kfree(bp->grp_info);
3317         bp->grp_info = NULL;
3318 }
3319
3320 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3321 {
3322         int i;
3323
3324         if (irq_re_init) {
3325                 bp->grp_info = kcalloc(bp->cp_nr_rings,
3326                                        sizeof(struct bnxt_ring_grp_info),
3327                                        GFP_KERNEL);
3328                 if (!bp->grp_info)
3329                         return -ENOMEM;
3330         }
3331         for (i = 0; i < bp->cp_nr_rings; i++) {
3332                 if (irq_re_init)
3333                         bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3334                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3335                 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3336                 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3337                 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3338         }
3339         return 0;
3340 }
3341
3342 static void bnxt_free_vnics(struct bnxt *bp)
3343 {
3344         kfree(bp->vnic_info);
3345         bp->vnic_info = NULL;
3346         bp->nr_vnics = 0;
3347 }
3348
3349 static int bnxt_alloc_vnics(struct bnxt *bp)
3350 {
3351         int num_vnics = 1;
3352
3353 #ifdef CONFIG_RFS_ACCEL
3354         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3355                 num_vnics += bp->rx_nr_rings;
3356 #endif
3357
3358         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3359                 num_vnics++;
3360
3361         bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3362                                 GFP_KERNEL);
3363         if (!bp->vnic_info)
3364                 return -ENOMEM;
3365
3366         bp->nr_vnics = num_vnics;
3367         return 0;
3368 }
3369
3370 static void bnxt_init_vnics(struct bnxt *bp)
3371 {
3372         int i;
3373
3374         for (i = 0; i < bp->nr_vnics; i++) {
3375                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3376                 int j;
3377
3378                 vnic->fw_vnic_id = INVALID_HW_RING_ID;
3379                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3380                         vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3381
3382                 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3383
3384                 if (bp->vnic_info[i].rss_hash_key) {
3385                         if (i == 0)
3386                                 prandom_bytes(vnic->rss_hash_key,
3387                                               HW_HASH_KEY_SIZE);
3388                         else
3389                                 memcpy(vnic->rss_hash_key,
3390                                        bp->vnic_info[0].rss_hash_key,
3391                                        HW_HASH_KEY_SIZE);
3392                 }
3393         }
3394 }
3395
3396 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3397 {
3398         int pages;
3399
3400         pages = ring_size / desc_per_pg;
3401
3402         if (!pages)
3403                 return 1;
3404
3405         pages++;
3406
3407         while (pages & (pages - 1))
3408                 pages++;
3409
3410         return pages;
3411 }
3412
3413 void bnxt_set_tpa_flags(struct bnxt *bp)
3414 {
3415         bp->flags &= ~BNXT_FLAG_TPA;
3416         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3417                 return;
3418         if (bp->dev->features & NETIF_F_LRO)
3419                 bp->flags |= BNXT_FLAG_LRO;
3420         else if (bp->dev->features & NETIF_F_GRO_HW)
3421                 bp->flags |= BNXT_FLAG_GRO;
3422 }
3423
3424 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3425  * be set on entry.
3426  */
3427 void bnxt_set_ring_params(struct bnxt *bp)
3428 {
3429         u32 ring_size, rx_size, rx_space;
3430         u32 agg_factor = 0, agg_ring_size = 0;
3431
3432         /* 8 for CRC and VLAN */
3433         rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3434
3435         rx_space = rx_size + NET_SKB_PAD +
3436                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3437
3438         bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3439         ring_size = bp->rx_ring_size;
3440         bp->rx_agg_ring_size = 0;
3441         bp->rx_agg_nr_pages = 0;
3442
3443         if (bp->flags & BNXT_FLAG_TPA)
3444                 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3445
3446         bp->flags &= ~BNXT_FLAG_JUMBO;
3447         if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3448                 u32 jumbo_factor;
3449
3450                 bp->flags |= BNXT_FLAG_JUMBO;
3451                 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3452                 if (jumbo_factor > agg_factor)
3453                         agg_factor = jumbo_factor;
3454         }
3455         agg_ring_size = ring_size * agg_factor;
3456
3457         if (agg_ring_size) {
3458                 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3459                                                         RX_DESC_CNT);
3460                 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3461                         u32 tmp = agg_ring_size;
3462
3463                         bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3464                         agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3465                         netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3466                                     tmp, agg_ring_size);
3467                 }
3468                 bp->rx_agg_ring_size = agg_ring_size;
3469                 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3470                 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3471                 rx_space = rx_size + NET_SKB_PAD +
3472                         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3473         }
3474
3475         bp->rx_buf_use_size = rx_size;
3476         bp->rx_buf_size = rx_space;
3477
3478         bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3479         bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3480
3481         ring_size = bp->tx_ring_size;
3482         bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3483         bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3484
3485         ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
3486         bp->cp_ring_size = ring_size;
3487
3488         bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3489         if (bp->cp_nr_pages > MAX_CP_PAGES) {
3490                 bp->cp_nr_pages = MAX_CP_PAGES;
3491                 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3492                 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3493                             ring_size, bp->cp_ring_size);
3494         }
3495         bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3496         bp->cp_ring_mask = bp->cp_bit - 1;
3497 }
3498
3499 /* Changing allocation mode of RX rings.
3500  * TODO: Update when extending xdp_rxq_info to support allocation modes.
3501  */
3502 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
3503 {
3504         if (page_mode) {
3505                 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3506                         return -EOPNOTSUPP;
3507                 bp->dev->max_mtu =
3508                         min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
3509                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3510                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
3511                 bp->rx_dir = DMA_BIDIRECTIONAL;
3512                 bp->rx_skb_func = bnxt_rx_page_skb;
3513                 /* Disable LRO or GRO_HW */
3514                 netdev_update_features(bp->dev);
3515         } else {
3516                 bp->dev->max_mtu = bp->max_mtu;
3517                 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3518                 bp->rx_dir = DMA_FROM_DEVICE;
3519                 bp->rx_skb_func = bnxt_rx_skb;
3520         }
3521         return 0;
3522 }
3523
3524 static void bnxt_free_vnic_attributes(struct bnxt *bp)
3525 {
3526         int i;
3527         struct bnxt_vnic_info *vnic;
3528         struct pci_dev *pdev = bp->pdev;
3529
3530         if (!bp->vnic_info)
3531                 return;
3532
3533         for (i = 0; i < bp->nr_vnics; i++) {
3534                 vnic = &bp->vnic_info[i];
3535
3536                 kfree(vnic->fw_grp_ids);
3537                 vnic->fw_grp_ids = NULL;
3538
3539                 kfree(vnic->uc_list);
3540                 vnic->uc_list = NULL;
3541
3542                 if (vnic->mc_list) {
3543                         dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3544                                           vnic->mc_list, vnic->mc_list_mapping);
3545                         vnic->mc_list = NULL;
3546                 }
3547
3548                 if (vnic->rss_table) {
3549                         dma_free_coherent(&pdev->dev, PAGE_SIZE,
3550                                           vnic->rss_table,
3551                                           vnic->rss_table_dma_addr);
3552                         vnic->rss_table = NULL;
3553                 }
3554
3555                 vnic->rss_hash_key = NULL;
3556                 vnic->flags = 0;
3557         }
3558 }
3559
3560 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3561 {
3562         int i, rc = 0, size;
3563         struct bnxt_vnic_info *vnic;
3564         struct pci_dev *pdev = bp->pdev;
3565         int max_rings;
3566
3567         for (i = 0; i < bp->nr_vnics; i++) {
3568                 vnic = &bp->vnic_info[i];
3569
3570                 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3571                         int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3572
3573                         if (mem_size > 0) {
3574                                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3575                                 if (!vnic->uc_list) {
3576                                         rc = -ENOMEM;
3577                                         goto out;
3578                                 }
3579                         }
3580                 }
3581
3582                 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3583                         vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3584                         vnic->mc_list =
3585                                 dma_alloc_coherent(&pdev->dev,
3586                                                    vnic->mc_list_size,
3587                                                    &vnic->mc_list_mapping,
3588                                                    GFP_KERNEL);
3589                         if (!vnic->mc_list) {
3590                                 rc = -ENOMEM;
3591                                 goto out;
3592                         }
3593                 }
3594
3595                 if (bp->flags & BNXT_FLAG_CHIP_P5)
3596                         goto vnic_skip_grps;
3597
3598                 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3599                         max_rings = bp->rx_nr_rings;
3600                 else
3601                         max_rings = 1;
3602
3603                 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3604                 if (!vnic->fw_grp_ids) {
3605                         rc = -ENOMEM;
3606                         goto out;
3607                 }
3608 vnic_skip_grps:
3609                 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3610                     !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3611                         continue;
3612
3613                 /* Allocate rss table and hash key */
3614                 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3615                                                      &vnic->rss_table_dma_addr,
3616                                                      GFP_KERNEL);
3617                 if (!vnic->rss_table) {
3618                         rc = -ENOMEM;
3619                         goto out;
3620                 }
3621
3622                 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3623
3624                 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3625                 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3626         }
3627         return 0;
3628
3629 out:
3630         return rc;
3631 }
3632
3633 static void bnxt_free_hwrm_resources(struct bnxt *bp)
3634 {
3635         struct pci_dev *pdev = bp->pdev;
3636
3637         if (bp->hwrm_cmd_resp_addr) {
3638                 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3639                                   bp->hwrm_cmd_resp_dma_addr);
3640                 bp->hwrm_cmd_resp_addr = NULL;
3641         }
3642
3643         if (bp->hwrm_cmd_kong_resp_addr) {
3644                 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3645                                   bp->hwrm_cmd_kong_resp_addr,
3646                                   bp->hwrm_cmd_kong_resp_dma_addr);
3647                 bp->hwrm_cmd_kong_resp_addr = NULL;
3648         }
3649 }
3650
3651 static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp)
3652 {
3653         struct pci_dev *pdev = bp->pdev;
3654
3655         if (bp->hwrm_cmd_kong_resp_addr)
3656                 return 0;
3657
3658         bp->hwrm_cmd_kong_resp_addr =
3659                 dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3660                                    &bp->hwrm_cmd_kong_resp_dma_addr,
3661                                    GFP_KERNEL);
3662         if (!bp->hwrm_cmd_kong_resp_addr)
3663                 return -ENOMEM;
3664
3665         return 0;
3666 }
3667
3668 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3669 {
3670         struct pci_dev *pdev = bp->pdev;
3671
3672         bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3673                                                    &bp->hwrm_cmd_resp_dma_addr,
3674                                                    GFP_KERNEL);
3675         if (!bp->hwrm_cmd_resp_addr)
3676                 return -ENOMEM;
3677
3678         return 0;
3679 }
3680
3681 static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3682 {
3683         if (bp->hwrm_short_cmd_req_addr) {
3684                 struct pci_dev *pdev = bp->pdev;
3685
3686                 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3687                                   bp->hwrm_short_cmd_req_addr,
3688                                   bp->hwrm_short_cmd_req_dma_addr);
3689                 bp->hwrm_short_cmd_req_addr = NULL;
3690         }
3691 }
3692
3693 static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3694 {
3695         struct pci_dev *pdev = bp->pdev;
3696
3697         if (bp->hwrm_short_cmd_req_addr)
3698                 return 0;
3699
3700         bp->hwrm_short_cmd_req_addr =
3701                 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
3702                                    &bp->hwrm_short_cmd_req_dma_addr,
3703                                    GFP_KERNEL);
3704         if (!bp->hwrm_short_cmd_req_addr)
3705                 return -ENOMEM;
3706
3707         return 0;
3708 }
3709
3710 static void bnxt_free_port_stats(struct bnxt *bp)
3711 {
3712         struct pci_dev *pdev = bp->pdev;
3713
3714         bp->flags &= ~BNXT_FLAG_PORT_STATS;
3715         bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3716
3717         if (bp->hw_rx_port_stats) {
3718                 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3719                                   bp->hw_rx_port_stats,
3720                                   bp->hw_rx_port_stats_map);
3721                 bp->hw_rx_port_stats = NULL;
3722         }
3723
3724         if (bp->hw_tx_port_stats_ext) {
3725                 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
3726                                   bp->hw_tx_port_stats_ext,
3727                                   bp->hw_tx_port_stats_ext_map);
3728                 bp->hw_tx_port_stats_ext = NULL;
3729         }
3730
3731         if (bp->hw_rx_port_stats_ext) {
3732                 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3733                                   bp->hw_rx_port_stats_ext,
3734                                   bp->hw_rx_port_stats_ext_map);
3735                 bp->hw_rx_port_stats_ext = NULL;
3736         }
3737
3738         if (bp->hw_pcie_stats) {
3739                 dma_free_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
3740                                   bp->hw_pcie_stats, bp->hw_pcie_stats_map);
3741                 bp->hw_pcie_stats = NULL;
3742         }
3743 }
3744
3745 static void bnxt_free_ring_stats(struct bnxt *bp)
3746 {
3747         struct pci_dev *pdev = bp->pdev;
3748         int size, i;
3749
3750         if (!bp->bnapi)
3751                 return;
3752
3753         size = bp->hw_ring_stats_size;
3754
3755         for (i = 0; i < bp->cp_nr_rings; i++) {
3756                 struct bnxt_napi *bnapi = bp->bnapi[i];
3757                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3758
3759                 if (cpr->hw_stats) {
3760                         dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3761                                           cpr->hw_stats_map);
3762                         cpr->hw_stats = NULL;
3763                 }
3764         }
3765 }
3766
3767 static int bnxt_alloc_stats(struct bnxt *bp)
3768 {
3769         u32 size, i;
3770         struct pci_dev *pdev = bp->pdev;
3771
3772         size = bp->hw_ring_stats_size;
3773
3774         for (i = 0; i < bp->cp_nr_rings; i++) {
3775                 struct bnxt_napi *bnapi = bp->bnapi[i];
3776                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3777
3778                 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3779                                                    &cpr->hw_stats_map,
3780                                                    GFP_KERNEL);
3781                 if (!cpr->hw_stats)
3782                         return -ENOMEM;
3783
3784                 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3785         }
3786
3787         if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
3788                 return 0;
3789
3790         if (bp->hw_rx_port_stats)
3791                 goto alloc_ext_stats;
3792
3793         bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3794                                  sizeof(struct tx_port_stats) + 1024;
3795
3796         bp->hw_rx_port_stats =
3797                 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3798                                    &bp->hw_rx_port_stats_map,
3799                                    GFP_KERNEL);
3800         if (!bp->hw_rx_port_stats)
3801                 return -ENOMEM;
3802
3803         bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + 512;
3804         bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3805                                    sizeof(struct rx_port_stats) + 512;
3806         bp->flags |= BNXT_FLAG_PORT_STATS;
3807
3808 alloc_ext_stats:
3809         /* Display extended statistics only if FW supports it */
3810         if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
3811                 if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
3812                         return 0;
3813
3814         if (bp->hw_rx_port_stats_ext)
3815                 goto alloc_tx_ext_stats;
3816
3817         bp->hw_rx_port_stats_ext =
3818                 dma_alloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3819                                    &bp->hw_rx_port_stats_ext_map, GFP_KERNEL);
3820         if (!bp->hw_rx_port_stats_ext)
3821                 return 0;
3822
3823 alloc_tx_ext_stats:
3824         if (bp->hw_tx_port_stats_ext)
3825                 goto alloc_pcie_stats;
3826
3827         if (bp->hwrm_spec_code >= 0x10902 ||
3828             (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
3829                 bp->hw_tx_port_stats_ext =
3830                         dma_alloc_coherent(&pdev->dev,
3831                                            sizeof(struct tx_port_stats_ext),
3832                                            &bp->hw_tx_port_stats_ext_map,
3833                                            GFP_KERNEL);
3834         }
3835         bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
3836
3837 alloc_pcie_stats:
3838         if (bp->hw_pcie_stats ||
3839             !(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED))
3840                 return 0;
3841
3842         bp->hw_pcie_stats =
3843                 dma_alloc_coherent(&pdev->dev, sizeof(struct pcie_ctx_hw_stats),
3844                                    &bp->hw_pcie_stats_map, GFP_KERNEL);
3845         if (!bp->hw_pcie_stats)
3846                 return 0;
3847
3848         bp->flags |= BNXT_FLAG_PCIE_STATS;
3849         return 0;
3850 }
3851
3852 static void bnxt_clear_ring_indices(struct bnxt *bp)
3853 {
3854         int i;
3855
3856         if (!bp->bnapi)
3857                 return;
3858
3859         for (i = 0; i < bp->cp_nr_rings; i++) {
3860                 struct bnxt_napi *bnapi = bp->bnapi[i];
3861                 struct bnxt_cp_ring_info *cpr;
3862                 struct bnxt_rx_ring_info *rxr;
3863                 struct bnxt_tx_ring_info *txr;
3864
3865                 if (!bnapi)
3866                         continue;
3867
3868                 cpr = &bnapi->cp_ring;
3869                 cpr->cp_raw_cons = 0;
3870
3871                 txr = bnapi->tx_ring;
3872                 if (txr) {
3873                         txr->tx_prod = 0;
3874                         txr->tx_cons = 0;
3875                 }
3876
3877                 rxr = bnapi->rx_ring;
3878                 if (rxr) {
3879                         rxr->rx_prod = 0;
3880                         rxr->rx_agg_prod = 0;
3881                         rxr->rx_sw_agg_prod = 0;
3882                         rxr->rx_next_cons = 0;
3883                 }
3884         }
3885 }
3886
3887 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3888 {
3889 #ifdef CONFIG_RFS_ACCEL
3890         int i;
3891
3892         /* Under rtnl_lock and all our NAPIs have been disabled.  It's
3893          * safe to delete the hash table.
3894          */
3895         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3896                 struct hlist_head *head;
3897                 struct hlist_node *tmp;
3898                 struct bnxt_ntuple_filter *fltr;
3899
3900                 head = &bp->ntp_fltr_hash_tbl[i];
3901                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3902                         hlist_del(&fltr->hash);
3903                         kfree(fltr);
3904                 }
3905         }
3906         if (irq_reinit) {
3907                 kfree(bp->ntp_fltr_bmap);
3908                 bp->ntp_fltr_bmap = NULL;
3909         }
3910         bp->ntp_fltr_count = 0;
3911 #endif
3912 }
3913
3914 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3915 {
3916 #ifdef CONFIG_RFS_ACCEL
3917         int i, rc = 0;
3918
3919         if (!(bp->flags & BNXT_FLAG_RFS))
3920                 return 0;
3921
3922         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3923                 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3924
3925         bp->ntp_fltr_count = 0;
3926         bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3927                                     sizeof(long),
3928                                     GFP_KERNEL);
3929
3930         if (!bp->ntp_fltr_bmap)
3931                 rc = -ENOMEM;
3932
3933         return rc;
3934 #else
3935         return 0;
3936 #endif
3937 }
3938
3939 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3940 {
3941         bnxt_free_vnic_attributes(bp);
3942         bnxt_free_tx_rings(bp);
3943         bnxt_free_rx_rings(bp);
3944         bnxt_free_cp_rings(bp);
3945         bnxt_free_ntp_fltrs(bp, irq_re_init);
3946         if (irq_re_init) {
3947                 bnxt_free_ring_stats(bp);
3948                 bnxt_free_ring_grps(bp);
3949                 bnxt_free_vnics(bp);
3950                 kfree(bp->tx_ring_map);
3951                 bp->tx_ring_map = NULL;
3952                 kfree(bp->tx_ring);
3953                 bp->tx_ring = NULL;
3954                 kfree(bp->rx_ring);
3955                 bp->rx_ring = NULL;
3956                 kfree(bp->bnapi);
3957                 bp->bnapi = NULL;
3958         } else {
3959                 bnxt_clear_ring_indices(bp);
3960         }
3961 }
3962
3963 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3964 {
3965         int i, j, rc, size, arr_size;
3966         void *bnapi;
3967
3968         if (irq_re_init) {
3969                 /* Allocate bnapi mem pointer array and mem block for
3970                  * all queues
3971                  */
3972                 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3973                                 bp->cp_nr_rings);
3974                 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3975                 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3976                 if (!bnapi)
3977                         return -ENOMEM;
3978
3979                 bp->bnapi = bnapi;
3980                 bnapi += arr_size;
3981                 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3982                         bp->bnapi[i] = bnapi;
3983                         bp->bnapi[i]->index = i;
3984                         bp->bnapi[i]->bp = bp;
3985                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
3986                                 struct bnxt_cp_ring_info *cpr =
3987                                         &bp->bnapi[i]->cp_ring;
3988
3989                                 cpr->cp_ring_struct.ring_mem.flags =
3990                                         BNXT_RMEM_RING_PTE_FLAG;
3991                         }
3992                 }
3993
3994                 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3995                                       sizeof(struct bnxt_rx_ring_info),
3996                                       GFP_KERNEL);
3997                 if (!bp->rx_ring)
3998                         return -ENOMEM;
3999
4000                 for (i = 0; i < bp->rx_nr_rings; i++) {
4001                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4002
4003                         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4004                                 rxr->rx_ring_struct.ring_mem.flags =
4005                                         BNXT_RMEM_RING_PTE_FLAG;
4006                                 rxr->rx_agg_ring_struct.ring_mem.flags =
4007                                         BNXT_RMEM_RING_PTE_FLAG;
4008                         }
4009                         rxr->bnapi = bp->bnapi[i];
4010                         bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4011                 }
4012
4013                 bp->tx_ring = kcalloc(bp->tx_nr_rings,
4014                                       sizeof(struct bnxt_tx_ring_info),
4015                                       GFP_KERNEL);
4016                 if (!bp->tx_ring)
4017                         return -ENOMEM;
4018
4019                 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4020                                           GFP_KERNEL);
4021
4022                 if (!bp->tx_ring_map)
4023                         return -ENOMEM;
4024
4025                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4026                         j = 0;
4027                 else
4028                         j = bp->rx_nr_rings;
4029
4030                 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4031                         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4032
4033                         if (bp->flags & BNXT_FLAG_CHIP_P5)
4034                                 txr->tx_ring_struct.ring_mem.flags =
4035                                         BNXT_RMEM_RING_PTE_FLAG;
4036                         txr->bnapi = bp->bnapi[j];
4037                         bp->bnapi[j]->tx_ring = txr;
4038                         bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4039                         if (i >= bp->tx_nr_rings_xdp) {
4040                                 txr->txq_index = i - bp->tx_nr_rings_xdp;
4041                                 bp->bnapi[j]->tx_int = bnxt_tx_int;
4042                         } else {
4043                                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4044                                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4045                         }
4046                 }
4047
4048                 rc = bnxt_alloc_stats(bp);
4049                 if (rc)
4050                         goto alloc_mem_err;
4051
4052                 rc = bnxt_alloc_ntp_fltrs(bp);
4053                 if (rc)
4054                         goto alloc_mem_err;
4055
4056                 rc = bnxt_alloc_vnics(bp);
4057                 if (rc)
4058                         goto alloc_mem_err;
4059         }
4060
4061         bnxt_init_ring_struct(bp);
4062
4063         rc = bnxt_alloc_rx_rings(bp);
4064         if (rc)
4065                 goto alloc_mem_err;
4066
4067         rc = bnxt_alloc_tx_rings(bp);
4068         if (rc)
4069                 goto alloc_mem_err;
4070
4071         rc = bnxt_alloc_cp_rings(bp);
4072         if (rc)
4073                 goto alloc_mem_err;
4074
4075         bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4076                                   BNXT_VNIC_UCAST_FLAG;
4077         rc = bnxt_alloc_vnic_attributes(bp);
4078         if (rc)
4079                 goto alloc_mem_err;
4080         return 0;
4081
4082 alloc_mem_err:
4083         bnxt_free_mem(bp, true);
4084         return rc;
4085 }
4086
4087 static void bnxt_disable_int(struct bnxt *bp)
4088 {
4089         int i;
4090
4091         if (!bp->bnapi)
4092                 return;
4093
4094         for (i = 0; i < bp->cp_nr_rings; i++) {
4095                 struct bnxt_napi *bnapi = bp->bnapi[i];
4096                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4097                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4098
4099                 if (ring->fw_ring_id != INVALID_HW_RING_ID)
4100                         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4101         }
4102 }
4103
4104 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4105 {
4106         struct bnxt_napi *bnapi = bp->bnapi[n];
4107         struct bnxt_cp_ring_info *cpr;
4108
4109         cpr = &bnapi->cp_ring;
4110         return cpr->cp_ring_struct.map_idx;
4111 }
4112
4113 static void bnxt_disable_int_sync(struct bnxt *bp)
4114 {
4115         int i;
4116
4117         atomic_inc(&bp->intr_sem);
4118
4119         bnxt_disable_int(bp);
4120         for (i = 0; i < bp->cp_nr_rings; i++) {
4121                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4122
4123                 synchronize_irq(bp->irq_tbl[map_idx].vector);
4124         }
4125 }
4126
4127 static void bnxt_enable_int(struct bnxt *bp)
4128 {
4129         int i;
4130
4131         atomic_set(&bp->intr_sem, 0);
4132         for (i = 0; i < bp->cp_nr_rings; i++) {
4133                 struct bnxt_napi *bnapi = bp->bnapi[i];
4134                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4135
4136                 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4137         }
4138 }
4139
4140 void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
4141                             u16 cmpl_ring, u16 target_id)
4142 {
4143         struct input *req = request;
4144
4145         req->req_type = cpu_to_le16(req_type);
4146         req->cmpl_ring = cpu_to_le16(cmpl_ring);
4147         req->target_id = cpu_to_le16(target_id);
4148         if (bnxt_kong_hwrm_message(bp, req))
4149                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
4150         else
4151                 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
4152 }
4153
4154 static int bnxt_hwrm_to_stderr(u32 hwrm_err)
4155 {
4156         switch (hwrm_err) {
4157         case HWRM_ERR_CODE_SUCCESS:
4158                 return 0;
4159         case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
4160                 return -EACCES;
4161         case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
4162                 return -ENOSPC;
4163         case HWRM_ERR_CODE_INVALID_PARAMS:
4164         case HWRM_ERR_CODE_INVALID_FLAGS:
4165         case HWRM_ERR_CODE_INVALID_ENABLES:
4166         case HWRM_ERR_CODE_UNSUPPORTED_TLV:
4167         case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR:
4168                 return -EINVAL;
4169         case HWRM_ERR_CODE_NO_BUFFER:
4170                 return -ENOMEM;
4171         case HWRM_ERR_CODE_HOT_RESET_PROGRESS:
4172                 return -EAGAIN;
4173         case HWRM_ERR_CODE_CMD_NOT_SUPPORTED:
4174                 return -EOPNOTSUPP;
4175         default:
4176                 return -EIO;
4177         }
4178 }
4179
4180 static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
4181                                  int timeout, bool silent)
4182 {
4183         int i, intr_process, rc, tmo_count;
4184         struct input *req = msg;
4185         u32 *data = msg;
4186         __le32 *resp_len;
4187         u8 *valid;
4188         u16 cp_ring_id, len = 0;
4189         struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
4190         u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
4191         struct hwrm_short_input short_input = {0};
4192         u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER;
4193         u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr;
4194         u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
4195         u16 dst = BNXT_HWRM_CHNL_CHIMP;
4196
4197         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4198                 return -EBUSY;
4199
4200         if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4201                 if (msg_len > bp->hwrm_max_ext_req_len ||
4202                     !bp->hwrm_short_cmd_req_addr)
4203                         return -EINVAL;
4204         }
4205
4206         if (bnxt_hwrm_kong_chnl(bp, req)) {
4207                 dst = BNXT_HWRM_CHNL_KONG;
4208                 bar_offset = BNXT_GRCPF_REG_KONG_COMM;
4209                 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER;
4210                 resp = bp->hwrm_cmd_kong_resp_addr;
4211                 resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr;
4212         }
4213
4214         memset(resp, 0, PAGE_SIZE);
4215         cp_ring_id = le16_to_cpu(req->cmpl_ring);
4216         intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
4217
4218         req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst));
4219         /* currently supports only one outstanding message */
4220         if (intr_process)
4221                 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
4222
4223         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
4224             msg_len > BNXT_HWRM_MAX_REQ_LEN) {
4225                 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
4226                 u16 max_msg_len;
4227
4228                 /* Set boundary for maximum extended request length for short
4229                  * cmd format. If passed up from device use the max supported
4230                  * internal req length.
4231                  */
4232                 max_msg_len = bp->hwrm_max_ext_req_len;
4233
4234                 memcpy(short_cmd_req, req, msg_len);
4235                 if (msg_len < max_msg_len)
4236                         memset(short_cmd_req + msg_len, 0,
4237                                max_msg_len - msg_len);
4238
4239                 short_input.req_type = req->req_type;
4240                 short_input.signature =
4241                                 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
4242                 short_input.size = cpu_to_le16(msg_len);
4243                 short_input.req_addr =
4244                         cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
4245
4246                 data = (u32 *)&short_input;
4247                 msg_len = sizeof(short_input);
4248
4249                 /* Sync memory write before updating doorbell */
4250                 wmb();
4251
4252                 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
4253         }
4254
4255         /* Write request msg to hwrm channel */
4256         __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4);
4257
4258         for (i = msg_len; i < max_req_len; i += 4)
4259                 writel(0, bp->bar0 + bar_offset + i);
4260
4261         /* Ring channel doorbell */
4262         writel(1, bp->bar0 + doorbell_offset);
4263
4264         if (!pci_is_enabled(bp->pdev))
4265                 return 0;
4266
4267         if (!timeout)
4268                 timeout = DFLT_HWRM_CMD_TIMEOUT;
4269         /* convert timeout to usec */
4270         timeout *= 1000;
4271
4272         i = 0;
4273         /* Short timeout for the first few iterations:
4274          * number of loops = number of loops for short timeout +
4275          * number of loops for standard timeout.
4276          */
4277         tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
4278         timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
4279         tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
4280         resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET);
4281
4282         if (intr_process) {
4283                 u16 seq_id = bp->hwrm_intr_seq_id;
4284
4285                 /* Wait until hwrm response cmpl interrupt is processed */
4286                 while (bp->hwrm_intr_seq_id != (u16)~seq_id &&
4287                        i++ < tmo_count) {
4288                         /* Abort the wait for completion if the FW health
4289                          * check has failed.
4290                          */
4291                         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4292                                 return -EBUSY;
4293                         /* on first few passes, just barely sleep */
4294                         if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4295                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4296                                              HWRM_SHORT_MAX_TIMEOUT);
4297                         else
4298                                 usleep_range(HWRM_MIN_TIMEOUT,
4299                                              HWRM_MAX_TIMEOUT);
4300                 }
4301
4302                 if (bp->hwrm_intr_seq_id != (u16)~seq_id) {
4303                         if (!silent)
4304                                 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
4305                                            le16_to_cpu(req->req_type));
4306                         return -EBUSY;
4307                 }
4308                 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
4309                       HWRM_RESP_LEN_SFT;
4310                 valid = resp_addr + len - 1;
4311         } else {
4312                 int j;
4313
4314                 /* Check if response len is updated */
4315                 for (i = 0; i < tmo_count; i++) {
4316                         /* Abort the wait for completion if the FW health
4317                          * check has failed.
4318                          */
4319                         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
4320                                 return -EBUSY;
4321                         len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
4322                               HWRM_RESP_LEN_SFT;
4323                         if (len)
4324                                 break;
4325                         /* on first few passes, just barely sleep */
4326                         if (i < HWRM_SHORT_TIMEOUT_COUNTER)
4327                                 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
4328                                              HWRM_SHORT_MAX_TIMEOUT);
4329                         else
4330                                 usleep_range(HWRM_MIN_TIMEOUT,
4331                                              HWRM_MAX_TIMEOUT);
4332                 }
4333
4334                 if (i >= tmo_count) {
4335                         if (!silent)
4336                                 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
4337                                            HWRM_TOTAL_TIMEOUT(i),
4338                                            le16_to_cpu(req->req_type),
4339                                            le16_to_cpu(req->seq_id), len);
4340                         return -EBUSY;
4341                 }
4342
4343                 /* Last byte of resp contains valid bit */
4344                 valid = resp_addr + len - 1;
4345                 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
4346                         /* make sure we read from updated DMA memory */
4347                         dma_rmb();
4348                         if (*valid)
4349                                 break;
4350                         usleep_range(1, 5);
4351                 }
4352
4353                 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
4354                         if (!silent)
4355                                 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
4356                                            HWRM_TOTAL_TIMEOUT(i),
4357                                            le16_to_cpu(req->req_type),
4358                                            le16_to_cpu(req->seq_id), len,
4359                                            *valid);
4360                         return -EBUSY;
4361                 }
4362         }
4363
4364         /* Zero valid bit for compatibility.  Valid bit in an older spec
4365          * may become a new field in a newer spec.  We must make sure that
4366          * a new field not implemented by old spec will read zero.
4367          */
4368         *valid = 0;
4369         rc = le16_to_cpu(resp->error_code);
4370         if (rc && !silent)
4371                 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
4372                            le16_to_cpu(resp->req_type),
4373                            le16_to_cpu(resp->seq_id), rc);
4374         return bnxt_hwrm_to_stderr(rc);
4375 }
4376
4377 int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4378 {
4379         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
4380 }
4381
4382 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4383                               int timeout)
4384 {
4385         return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4386 }
4387
4388 int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
4389 {
4390         int rc;
4391
4392         mutex_lock(&bp->hwrm_cmd_lock);
4393         rc = _hwrm_send_message(bp, msg, msg_len, timeout);
4394         mutex_unlock(&bp->hwrm_cmd_lock);
4395         return rc;
4396 }
4397
4398 int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
4399                              int timeout)
4400 {
4401         int rc;
4402
4403         mutex_lock(&bp->hwrm_cmd_lock);
4404         rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
4405         mutex_unlock(&bp->hwrm_cmd_lock);
4406         return rc;
4407 }
4408
4409 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4410                             bool async_only)
4411 {
4412         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
4413         struct hwrm_func_drv_rgtr_input req = {0};
4414         DECLARE_BITMAP(async_events_bmap, 256);
4415         u32 *events = (u32 *)async_events_bmap;
4416         u32 flags;
4417         int rc, i;
4418
4419         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
4420
4421         req.enables =
4422                 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4423                             FUNC_DRV_RGTR_REQ_ENABLES_VER |
4424                             FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4425
4426         req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4427         flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4428         if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4429                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4430         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4431                 flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4432                          FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4433         req.flags = cpu_to_le32(flags);
4434         req.ver_maj_8b = DRV_VER_MAJ;
4435         req.ver_min_8b = DRV_VER_MIN;
4436         req.ver_upd_8b = DRV_VER_UPD;
4437         req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
4438         req.ver_min = cpu_to_le16(DRV_VER_MIN);
4439         req.ver_upd = cpu_to_le16(DRV_VER_UPD);
4440
4441         if (BNXT_PF(bp)) {
4442                 u32 data[8];
4443                 int i;
4444
4445                 memset(data, 0, sizeof(data));
4446                 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4447                         u16 cmd = bnxt_vf_req_snif[i];
4448                         unsigned int bit, idx;
4449
4450                         idx = cmd / 32;
4451                         bit = cmd % 32;
4452                         data[idx] |= 1 << bit;
4453                 }
4454
4455                 for (i = 0; i < 8; i++)
4456                         req.vf_req_fwd[i] = cpu_to_le32(data[i]);
4457
4458                 req.enables |=
4459                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4460         }
4461
4462         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4463                 req.flags |= cpu_to_le32(
4464                         FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4465
4466         memset(async_events_bmap, 0, sizeof(async_events_bmap));
4467         for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4468                 u16 event_id = bnxt_async_events_arr[i];
4469
4470                 if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4471                     !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4472                         continue;
4473                 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4474         }
4475         if (bmap && bmap_size) {
4476                 for (i = 0; i < bmap_size; i++) {
4477                         if (test_bit(i, bmap))
4478                                 __set_bit(i, async_events_bmap);
4479                 }
4480         }
4481         for (i = 0; i < 8; i++)
4482                 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
4483
4484         if (async_only)
4485                 req.enables =
4486                         cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4487
4488         mutex_lock(&bp->hwrm_cmd_lock);
4489         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4490         if (!rc) {
4491                 set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4492                 if (resp->flags &
4493                     cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4494                         bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4495         }
4496         mutex_unlock(&bp->hwrm_cmd_lock);
4497         return rc;
4498 }
4499
4500 static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4501 {
4502         struct hwrm_func_drv_unrgtr_input req = {0};
4503
4504         if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4505                 return 0;
4506
4507         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4508         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4509 }
4510
4511 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4512 {
4513         u32 rc = 0;
4514         struct hwrm_tunnel_dst_port_free_input req = {0};
4515
4516         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4517         req.tunnel_type = tunnel_type;
4518
4519         switch (tunnel_type) {
4520         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4521                 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
4522                 break;
4523         case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4524                 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
4525                 break;
4526         default:
4527                 break;
4528         }
4529
4530         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4531         if (rc)
4532                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4533                            rc);
4534         return rc;
4535 }
4536
4537 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4538                                            u8 tunnel_type)
4539 {
4540         u32 rc = 0;
4541         struct hwrm_tunnel_dst_port_alloc_input req = {0};
4542         struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4543
4544         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4545
4546         req.tunnel_type = tunnel_type;
4547         req.tunnel_dst_port_val = port;
4548
4549         mutex_lock(&bp->hwrm_cmd_lock);
4550         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4551         if (rc) {
4552                 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4553                            rc);
4554                 goto err_out;
4555         }
4556
4557         switch (tunnel_type) {
4558         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4559                 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
4560                 break;
4561         case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4562                 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
4563                 break;
4564         default:
4565                 break;
4566         }
4567
4568 err_out:
4569         mutex_unlock(&bp->hwrm_cmd_lock);
4570         return rc;
4571 }
4572
4573 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4574 {
4575         struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4576         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4577
4578         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
4579         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4580
4581         req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4582         req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4583         req.mask = cpu_to_le32(vnic->rx_mask);
4584         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4585 }
4586
4587 #ifdef CONFIG_RFS_ACCEL
4588 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4589                                             struct bnxt_ntuple_filter *fltr)
4590 {
4591         struct hwrm_cfa_ntuple_filter_free_input req = {0};
4592
4593         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4594         req.ntuple_filter_id = fltr->filter_id;
4595         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4596 }
4597
4598 #define BNXT_NTP_FLTR_FLAGS                                     \
4599         (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |     \
4600          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |        \
4601          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |      \
4602          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |      \
4603          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |       \
4604          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
4605          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |       \
4606          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
4607          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |      \
4608          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |         \
4609          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
4610          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |         \
4611          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
4612          CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4613
4614 #define BNXT_NTP_TUNNEL_FLTR_FLAG                               \
4615                 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4616
4617 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4618                                              struct bnxt_ntuple_filter *fltr)
4619 {
4620         struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4621         struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4622         struct flow_keys *keys = &fltr->fkeys;
4623         struct bnxt_vnic_info *vnic;
4624         u32 flags = 0;
4625         int rc = 0;
4626
4627         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
4628         req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4629
4630         if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4631                 flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4632                 req.dst_id = cpu_to_le16(fltr->rxq);
4633         } else {
4634                 vnic = &bp->vnic_info[fltr->rxq + 1];
4635                 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
4636         }
4637         req.flags = cpu_to_le32(flags);
4638         req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4639
4640         req.ethertype = htons(ETH_P_IP);
4641         memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4642         req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4643         req.ip_protocol = keys->basic.ip_proto;
4644
4645         if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4646                 int i;
4647
4648                 req.ethertype = htons(ETH_P_IPV6);
4649                 req.ip_addr_type =
4650                         CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4651                 *(struct in6_addr *)&req.src_ipaddr[0] =
4652                         keys->addrs.v6addrs.src;
4653                 *(struct in6_addr *)&req.dst_ipaddr[0] =
4654                         keys->addrs.v6addrs.dst;
4655                 for (i = 0; i < 4; i++) {
4656                         req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4657                         req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4658                 }
4659         } else {
4660                 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4661                 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4662                 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4663                 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4664         }
4665         if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4666                 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4667                 req.tunnel_type =
4668                         CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4669         }
4670
4671         req.src_port = keys->ports.src;
4672         req.src_port_mask = cpu_to_be16(0xffff);
4673         req.dst_port = keys->ports.dst;
4674         req.dst_port_mask = cpu_to_be16(0xffff);
4675
4676         mutex_lock(&bp->hwrm_cmd_lock);
4677         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4678         if (!rc) {
4679                 resp = bnxt_get_hwrm_resp_addr(bp, &req);
4680                 fltr->filter_id = resp->ntuple_filter_id;
4681         }
4682         mutex_unlock(&bp->hwrm_cmd_lock);
4683         return rc;
4684 }
4685 #endif
4686
4687 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4688                                      u8 *mac_addr)
4689 {
4690         u32 rc = 0;
4691         struct hwrm_cfa_l2_filter_alloc_input req = {0};
4692         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4693
4694         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
4695         req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4696         if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4697                 req.flags |=
4698                         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
4699         req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
4700         req.enables =
4701                 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
4702                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
4703                             CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4704         memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4705         req.l2_addr_mask[0] = 0xff;
4706         req.l2_addr_mask[1] = 0xff;
4707         req.l2_addr_mask[2] = 0xff;
4708         req.l2_addr_mask[3] = 0xff;
4709         req.l2_addr_mask[4] = 0xff;
4710         req.l2_addr_mask[5] = 0xff;
4711
4712         mutex_lock(&bp->hwrm_cmd_lock);
4713         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4714         if (!rc)
4715                 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4716                                                         resp->l2_filter_id;
4717         mutex_unlock(&bp->hwrm_cmd_lock);
4718         return rc;
4719 }
4720
4721 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4722 {
4723         u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4724         int rc = 0;
4725
4726         /* Any associated ntuple filters will also be cleared by firmware. */
4727         mutex_lock(&bp->hwrm_cmd_lock);
4728         for (i = 0; i < num_of_vnics; i++) {
4729                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4730
4731                 for (j = 0; j < vnic->uc_filter_count; j++) {
4732                         struct hwrm_cfa_l2_filter_free_input req = {0};
4733
4734                         bnxt_hwrm_cmd_hdr_init(bp, &req,
4735                                                HWRM_CFA_L2_FILTER_FREE, -1, -1);
4736
4737                         req.l2_filter_id = vnic->fw_l2_filter_id[j];
4738
4739                         rc = _hwrm_send_message(bp, &req, sizeof(req),
4740                                                 HWRM_CMD_TIMEOUT);
4741                 }
4742                 vnic->uc_filter_count = 0;
4743         }
4744         mutex_unlock(&bp->hwrm_cmd_lock);
4745
4746         return rc;
4747 }
4748
4749 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4750 {
4751         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4752         u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
4753         struct hwrm_vnic_tpa_cfg_input req = {0};
4754
4755         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4756                 return 0;
4757
4758         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4759
4760         if (tpa_flags) {
4761                 u16 mss = bp->dev->mtu - 40;
4762                 u32 nsegs, n, segs = 0, flags;
4763
4764                 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4765                         VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4766                         VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4767                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4768                         VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4769                 if (tpa_flags & BNXT_FLAG_GRO)
4770                         flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4771
4772                 req.flags = cpu_to_le32(flags);
4773
4774                 req.enables =
4775                         cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
4776                                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4777                                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
4778
4779                 /* Number of segs are log2 units, and first packet is not
4780                  * included as part of this units.
4781                  */
4782                 if (mss <= BNXT_RX_PAGE_SIZE) {
4783                         n = BNXT_RX_PAGE_SIZE / mss;
4784                         nsegs = (MAX_SKB_FRAGS - 1) * n;
4785                 } else {
4786                         n = mss / BNXT_RX_PAGE_SIZE;
4787                         if (mss & (BNXT_RX_PAGE_SIZE - 1))
4788                                 n++;
4789                         nsegs = (MAX_SKB_FRAGS - n) / n;
4790                 }
4791
4792                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4793                         segs = MAX_TPA_SEGS_P5;
4794                         max_aggs = bp->max_tpa;
4795                 } else {
4796                         segs = ilog2(nsegs);
4797                 }
4798                 req.max_agg_segs = cpu_to_le16(segs);
4799                 req.max_aggs = cpu_to_le16(max_aggs);
4800
4801                 req.min_agg_len = cpu_to_le32(512);
4802         }
4803         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4804
4805         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4806 }
4807
4808 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4809 {
4810         struct bnxt_ring_grp_info *grp_info;
4811
4812         grp_info = &bp->grp_info[ring->grp_idx];
4813         return grp_info->cp_fw_ring_id;
4814 }
4815
4816 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4817 {
4818         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4819                 struct bnxt_napi *bnapi = rxr->bnapi;
4820                 struct bnxt_cp_ring_info *cpr;
4821
4822                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4823                 return cpr->cp_ring_struct.fw_ring_id;
4824         } else {
4825                 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4826         }
4827 }
4828
4829 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4830 {
4831         if (bp->flags & BNXT_FLAG_CHIP_P5) {
4832                 struct bnxt_napi *bnapi = txr->bnapi;
4833                 struct bnxt_cp_ring_info *cpr;
4834
4835                 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4836                 return cpr->cp_ring_struct.fw_ring_id;
4837         } else {
4838                 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4839         }
4840 }
4841
4842 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
4843 {
4844         u32 i, j, max_rings;
4845         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4846         struct hwrm_vnic_rss_cfg_input req = {0};
4847
4848         if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
4849             vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
4850                 return 0;
4851
4852         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4853         if (set_rss) {
4854                 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4855                 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4856                 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
4857                         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4858                                 max_rings = bp->rx_nr_rings - 1;
4859                         else
4860                                 max_rings = bp->rx_nr_rings;
4861                 } else {
4862                         max_rings = 1;
4863                 }
4864
4865                 /* Fill the RSS indirection table with ring group ids */
4866                 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
4867                         if (j == max_rings)
4868                                 j = 0;
4869                         vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
4870                 }
4871
4872                 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4873                 req.hash_key_tbl_addr =
4874                         cpu_to_le64(vnic->rss_hash_key_dma_addr);
4875         }
4876         req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4877         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4878 }
4879
4880 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
4881 {
4882         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4883         u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
4884         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4885         struct hwrm_vnic_rss_cfg_input req = {0};
4886
4887         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4888         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4889         if (!set_rss) {
4890                 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4891                 return 0;
4892         }
4893         req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4894         req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4895         req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4896         req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
4897         nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
4898         for (i = 0, k = 0; i < nr_ctxs; i++) {
4899                 __le16 *ring_tbl = vnic->rss_table;
4900                 int rc;
4901
4902                 req.ring_table_pair_index = i;
4903                 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
4904                 for (j = 0; j < 64; j++) {
4905                         u16 ring_id;
4906
4907                         ring_id = rxr->rx_ring_struct.fw_ring_id;
4908                         *ring_tbl++ = cpu_to_le16(ring_id);
4909                         ring_id = bnxt_cp_ring_for_rx(bp, rxr);
4910                         *ring_tbl++ = cpu_to_le16(ring_id);
4911                         rxr++;
4912                         k++;
4913                         if (k == max_rings) {
4914                                 k = 0;
4915                                 rxr = &bp->rx_ring[0];
4916                         }
4917                 }
4918                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4919                 if (rc)
4920                         return rc;
4921         }
4922         return 0;
4923 }
4924
4925 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
4926 {
4927         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4928         struct hwrm_vnic_plcmodes_cfg_input req = {0};
4929
4930         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
4931         req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
4932                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
4933                                 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
4934         req.enables =
4935                 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
4936                             VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
4937         /* thresholds not implemented in firmware yet */
4938         req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
4939         req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
4940         req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4941         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4942 }
4943
4944 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
4945                                         u16 ctx_idx)
4946 {
4947         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
4948
4949         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
4950         req.rss_cos_lb_ctx_id =
4951                 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
4952
4953         hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4954         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
4955 }
4956
4957 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
4958 {
4959         int i, j;
4960
4961         for (i = 0; i < bp->nr_vnics; i++) {
4962                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4963
4964                 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
4965                         if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
4966                                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
4967                 }
4968         }
4969         bp->rsscos_nr_ctxs = 0;
4970 }
4971
4972 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
4973 {
4974         int rc;
4975         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
4976         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
4977                                                 bp->hwrm_cmd_resp_addr;
4978
4979         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
4980                                -1);
4981
4982         mutex_lock(&bp->hwrm_cmd_lock);
4983         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4984         if (!rc)
4985                 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
4986                         le16_to_cpu(resp->rss_cos_lb_ctx_id);
4987         mutex_unlock(&bp->hwrm_cmd_lock);
4988
4989         return rc;
4990 }
4991
4992 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
4993 {
4994         if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
4995                 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
4996         return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
4997 }
4998
4999 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5000 {
5001         unsigned int ring = 0, grp_idx;
5002         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5003         struct hwrm_vnic_cfg_input req = {0};
5004         u16 def_vlan = 0;
5005
5006         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
5007
5008         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5009                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5010
5011                 req.default_rx_ring_id =
5012                         cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5013                 req.default_cmpl_ring_id =
5014                         cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5015                 req.enables =
5016                         cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5017                                     VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5018                 goto vnic_mru;
5019         }
5020         req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5021         /* Only RSS support for now TBD: COS & LB */
5022         if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5023                 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5024                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5025                                            VNIC_CFG_REQ_ENABLES_MRU);
5026         } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5027                 req.rss_rule =
5028                         cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5029                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5030                                            VNIC_CFG_REQ_ENABLES_MRU);
5031                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5032         } else {
5033                 req.rss_rule = cpu_to_le16(0xffff);
5034         }
5035
5036         if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5037             (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5038                 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5039                 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5040         } else {
5041                 req.cos_rule = cpu_to_le16(0xffff);
5042         }
5043
5044         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5045                 ring = 0;
5046         else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5047                 ring = vnic_id - 1;
5048         else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5049                 ring = bp->rx_nr_rings - 1;
5050
5051         grp_idx = bp->rx_ring[ring].bnapi->index;
5052         req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5053         req.lb_rule = cpu_to_le16(0xffff);
5054 vnic_mru:
5055         req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
5056                               VLAN_HLEN);
5057
5058         req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5059 #ifdef CONFIG_BNXT_SRIOV
5060         if (BNXT_VF(bp))
5061                 def_vlan = bp->vf.vlan;
5062 #endif
5063         if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5064                 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5065         if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5066                 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5067
5068         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5069 }
5070
5071 static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5072 {
5073         u32 rc = 0;
5074
5075         if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5076                 struct hwrm_vnic_free_input req = {0};
5077
5078                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
5079                 req.vnic_id =
5080                         cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5081
5082                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5083                 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5084         }
5085         return rc;
5086 }
5087
5088 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5089 {
5090         u16 i;
5091
5092         for (i = 0; i < bp->nr_vnics; i++)
5093                 bnxt_hwrm_vnic_free_one(bp, i);
5094 }
5095
5096 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5097                                 unsigned int start_rx_ring_idx,
5098                                 unsigned int nr_rings)
5099 {
5100         int rc = 0;
5101         unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5102         struct hwrm_vnic_alloc_input req = {0};
5103         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5104         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5105
5106         if (bp->flags & BNXT_FLAG_CHIP_P5)
5107                 goto vnic_no_ring_grps;
5108
5109         /* map ring groups to this vnic */
5110         for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5111                 grp_idx = bp->rx_ring[i].bnapi->index;
5112                 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5113                         netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5114                                    j, nr_rings);
5115                         break;
5116                 }
5117                 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5118         }
5119
5120 vnic_no_ring_grps:
5121         for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5122                 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5123         if (vnic_id == 0)
5124                 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5125
5126         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
5127
5128         mutex_lock(&bp->hwrm_cmd_lock);
5129         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5130         if (!rc)
5131                 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5132         mutex_unlock(&bp->hwrm_cmd_lock);
5133         return rc;
5134 }
5135
5136 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5137 {
5138         struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5139         struct hwrm_vnic_qcaps_input req = {0};
5140         int rc;
5141
5142         bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5143         bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5144         if (bp->hwrm_spec_code < 0x10600)
5145                 return 0;
5146
5147         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
5148         mutex_lock(&bp->hwrm_cmd_lock);
5149         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5150         if (!rc) {
5151                 u32 flags = le32_to_cpu(resp->flags);
5152
5153                 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5154                     (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5155                         bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5156                 if (flags &
5157                     VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5158                         bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5159                 bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5160                 if (bp->max_tpa_v2)
5161                         bp->hw_ring_stats_size =
5162                                 sizeof(struct ctx_hw_stats_ext);
5163         }
5164         mutex_unlock(&bp->hwrm_cmd_lock);
5165         return rc;
5166 }
5167
5168 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5169 {
5170         u16 i;
5171         u32 rc = 0;
5172
5173         if (bp->flags & BNXT_FLAG_CHIP_P5)
5174                 return 0;
5175
5176         mutex_lock(&bp->hwrm_cmd_lock);
5177         for (i = 0; i < bp->rx_nr_rings; i++) {
5178                 struct hwrm_ring_grp_alloc_input req = {0};
5179                 struct hwrm_ring_grp_alloc_output *resp =
5180                                         bp->hwrm_cmd_resp_addr;
5181                 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5182
5183                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
5184
5185                 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5186                 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5187                 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5188                 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5189
5190                 rc = _hwrm_send_message(bp, &req, sizeof(req),
5191                                         HWRM_CMD_TIMEOUT);
5192                 if (rc)
5193                         break;
5194
5195                 bp->grp_info[grp_idx].fw_grp_id =
5196                         le32_to_cpu(resp->ring_group_id);
5197         }
5198         mutex_unlock(&bp->hwrm_cmd_lock);
5199         return rc;
5200 }
5201
5202 static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5203 {
5204         u16 i;
5205         u32 rc = 0;
5206         struct hwrm_ring_grp_free_input req = {0};
5207
5208         if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5209                 return 0;
5210
5211         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
5212
5213         mutex_lock(&bp->hwrm_cmd_lock);
5214         for (i = 0; i < bp->cp_nr_rings; i++) {
5215                 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5216                         continue;
5217                 req.ring_group_id =
5218                         cpu_to_le32(bp->grp_info[i].fw_grp_id);
5219
5220                 rc = _hwrm_send_message(bp, &req, sizeof(req),
5221                                         HWRM_CMD_TIMEOUT);
5222                 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5223         }
5224         mutex_unlock(&bp->hwrm_cmd_lock);
5225         return rc;
5226 }
5227
5228 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5229                                     struct bnxt_ring_struct *ring,
5230                                     u32 ring_type, u32 map_index)
5231 {
5232         int rc = 0, err = 0;
5233         struct hwrm_ring_alloc_input req = {0};
5234         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5235         struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5236         struct bnxt_ring_grp_info *grp_info;
5237         u16 ring_id;
5238
5239         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
5240
5241         req.enables = 0;
5242         if (rmem->nr_pages > 1) {
5243                 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5244                 /* Page size is in log2 units */
5245                 req.page_size = BNXT_PAGE_SHIFT;
5246                 req.page_tbl_depth = 1;
5247         } else {
5248                 req.page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
5249         }
5250         req.fbo = 0;
5251         /* Association of ring index with doorbell index and MSIX number */
5252         req.logical_id = cpu_to_le16(map_index);
5253
5254         switch (ring_type) {
5255         case HWRM_RING_ALLOC_TX: {
5256                 struct bnxt_tx_ring_info *txr;
5257
5258                 txr = container_of(ring, struct bnxt_tx_ring_info,
5259                                    tx_ring_struct);
5260                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5261                 /* Association of transmit ring with completion ring */
5262                 grp_info = &bp->grp_info[ring->grp_idx];
5263                 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5264                 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
5265                 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5266                 req.queue_id = cpu_to_le16(ring->queue_id);
5267                 break;
5268         }
5269         case HWRM_RING_ALLOC_RX:
5270                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5271                 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
5272                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5273                         u16 flags = 0;
5274
5275                         /* Association of rx ring with stats context */
5276                         grp_info = &bp->grp_info[ring->grp_idx];
5277                         req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5278                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5279                         req.enables |= cpu_to_le32(
5280                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5281                         if (NET_IP_ALIGN == 2)
5282                                 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5283                         req.flags = cpu_to_le16(flags);
5284                 }
5285                 break;
5286         case HWRM_RING_ALLOC_AGG:
5287                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5288                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5289                         /* Association of agg ring with rx ring */
5290                         grp_info = &bp->grp_info[ring->grp_idx];
5291                         req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5292                         req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5293                         req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5294                         req.enables |= cpu_to_le32(
5295                                 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5296                                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5297                 } else {
5298                         req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5299                 }
5300                 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5301                 break;
5302         case HWRM_RING_ALLOC_CMPL:
5303                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5304                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5305                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5306                         /* Association of cp ring with nq */
5307                         grp_info = &bp->grp_info[map_index];
5308                         req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5309                         req.cq_handle = cpu_to_le64(ring->handle);
5310                         req.enables |= cpu_to_le32(
5311                                 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5312                 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5313                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5314                 }
5315                 break;
5316         case HWRM_RING_ALLOC_NQ:
5317                 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5318                 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
5319                 if (bp->flags & BNXT_FLAG_USING_MSIX)
5320                         req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5321                 break;
5322         default:
5323                 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5324                            ring_type);
5325                 return -1;
5326         }
5327
5328         mutex_lock(&bp->hwrm_cmd_lock);
5329         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5330         err = le16_to_cpu(resp->error_code);
5331         ring_id = le16_to_cpu(resp->ring_id);
5332         mutex_unlock(&bp->hwrm_cmd_lock);
5333
5334         if (rc || err) {
5335                 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5336                            ring_type, rc, err);
5337                 return -EIO;
5338         }
5339         ring->fw_ring_id = ring_id;
5340         return rc;
5341 }
5342
5343 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5344 {
5345         int rc;
5346
5347         if (BNXT_PF(bp)) {
5348                 struct hwrm_func_cfg_input req = {0};
5349
5350                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
5351                 req.fid = cpu_to_le16(0xffff);
5352                 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5353                 req.async_event_cr = cpu_to_le16(idx);
5354                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5355         } else {
5356                 struct hwrm_func_vf_cfg_input req = {0};
5357
5358                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
5359                 req.enables =
5360                         cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5361                 req.async_event_cr = cpu_to_le16(idx);
5362                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5363         }
5364         return rc;
5365 }
5366
5367 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5368                         u32 map_idx, u32 xid)
5369 {
5370         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5371                 if (BNXT_PF(bp))
5372                         db->doorbell = bp->bar1 + 0x10000;
5373                 else
5374                         db->doorbell = bp->bar1 + 0x4000;
5375                 switch (ring_type) {
5376                 case HWRM_RING_ALLOC_TX:
5377                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5378                         break;
5379                 case HWRM_RING_ALLOC_RX:
5380                 case HWRM_RING_ALLOC_AGG:
5381                         db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5382                         break;
5383                 case HWRM_RING_ALLOC_CMPL:
5384                         db->db_key64 = DBR_PATH_L2;
5385                         break;
5386                 case HWRM_RING_ALLOC_NQ:
5387                         db->db_key64 = DBR_PATH_L2;
5388                         break;
5389                 }
5390                 db->db_key64 |= (u64)xid << DBR_XID_SFT;
5391         } else {
5392                 db->doorbell = bp->bar1 + map_idx * 0x80;
5393                 switch (ring_type) {
5394                 case HWRM_RING_ALLOC_TX:
5395                         db->db_key32 = DB_KEY_TX;
5396                         break;
5397                 case HWRM_RING_ALLOC_RX:
5398                 case HWRM_RING_ALLOC_AGG:
5399                         db->db_key32 = DB_KEY_RX;
5400                         break;
5401                 case HWRM_RING_ALLOC_CMPL:
5402                         db->db_key32 = DB_KEY_CP;
5403                         break;
5404                 }
5405         }
5406 }
5407
5408 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5409 {
5410         bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5411         int i, rc = 0;
5412         u32 type;
5413
5414         if (bp->flags & BNXT_FLAG_CHIP_P5)
5415                 type = HWRM_RING_ALLOC_NQ;
5416         else
5417                 type = HWRM_RING_ALLOC_CMPL;
5418         for (i = 0; i < bp->cp_nr_rings; i++) {
5419                 struct bnxt_napi *bnapi = bp->bnapi[i];
5420                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5421                 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5422                 u32 map_idx = ring->map_idx;
5423                 unsigned int vector;
5424
5425                 vector = bp->irq_tbl[map_idx].vector;
5426                 disable_irq_nosync(vector);
5427                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5428                 if (rc) {
5429                         enable_irq(vector);
5430                         goto err_out;
5431                 }
5432                 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5433                 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5434                 enable_irq(vector);
5435                 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5436
5437                 if (!i) {
5438                         rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5439                         if (rc)
5440                                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5441                 }
5442         }
5443
5444         type = HWRM_RING_ALLOC_TX;
5445         for (i = 0; i < bp->tx_nr_rings; i++) {
5446                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5447                 struct bnxt_ring_struct *ring;
5448                 u32 map_idx;
5449
5450                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5451                         struct bnxt_napi *bnapi = txr->bnapi;
5452                         struct bnxt_cp_ring_info *cpr, *cpr2;
5453                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5454
5455                         cpr = &bnapi->cp_ring;
5456                         cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5457                         ring = &cpr2->cp_ring_struct;
5458                         ring->handle = BNXT_TX_HDL;
5459                         map_idx = bnapi->index;
5460                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5461                         if (rc)
5462                                 goto err_out;
5463                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5464                                     ring->fw_ring_id);
5465                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5466                 }
5467                 ring = &txr->tx_ring_struct;
5468                 map_idx = i;
5469                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5470                 if (rc)
5471                         goto err_out;
5472                 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5473         }
5474
5475         type = HWRM_RING_ALLOC_RX;
5476         for (i = 0; i < bp->rx_nr_rings; i++) {
5477                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5478                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5479                 struct bnxt_napi *bnapi = rxr->bnapi;
5480                 u32 map_idx = bnapi->index;
5481
5482                 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5483                 if (rc)
5484                         goto err_out;
5485                 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5486                 /* If we have agg rings, post agg buffers first. */
5487                 if (!agg_rings)
5488                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5489                 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5490                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5491                         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5492                         u32 type2 = HWRM_RING_ALLOC_CMPL;
5493                         struct bnxt_cp_ring_info *cpr2;
5494
5495                         cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5496                         ring = &cpr2->cp_ring_struct;
5497                         ring->handle = BNXT_RX_HDL;
5498                         rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5499                         if (rc)
5500                                 goto err_out;
5501                         bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5502                                     ring->fw_ring_id);
5503                         bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5504                 }
5505         }
5506
5507         if (agg_rings) {
5508                 type = HWRM_RING_ALLOC_AGG;
5509                 for (i = 0; i < bp->rx_nr_rings; i++) {
5510                         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5511                         struct bnxt_ring_struct *ring =
5512                                                 &rxr->rx_agg_ring_struct;
5513                         u32 grp_idx = ring->grp_idx;
5514                         u32 map_idx = grp_idx + bp->rx_nr_rings;
5515
5516                         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5517                         if (rc)
5518                                 goto err_out;
5519
5520                         bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5521                                     ring->fw_ring_id);
5522                         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5523                         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5524                         bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5525                 }
5526         }
5527 err_out:
5528         return rc;
5529 }
5530
5531 static int hwrm_ring_free_send_msg(struct bnxt *bp,
5532                                    struct bnxt_ring_struct *ring,
5533                                    u32 ring_type, int cmpl_ring_id)
5534 {
5535         int rc;
5536         struct hwrm_ring_free_input req = {0};
5537         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5538         u16 error_code;
5539
5540         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
5541                 return 0;
5542
5543         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
5544         req.ring_type = ring_type;
5545         req.ring_id = cpu_to_le16(ring->fw_ring_id);
5546
5547         mutex_lock(&bp->hwrm_cmd_lock);
5548         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5549         error_code = le16_to_cpu(resp->error_code);
5550         mutex_unlock(&bp->hwrm_cmd_lock);
5551
5552         if (rc || error_code) {
5553                 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5554                            ring_type, rc, error_code);
5555                 return -EIO;
5556         }
5557         return 0;
5558 }
5559
5560 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
5561 {
5562         u32 type;
5563         int i;
5564
5565         if (!bp->bnapi)
5566                 return;
5567
5568         for (i = 0; i < bp->tx_nr_rings; i++) {
5569                 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5570                 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
5571
5572                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5573                         u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
5574
5575                         hwrm_ring_free_send_msg(bp, ring,
5576                                                 RING_FREE_REQ_RING_TYPE_TX,
5577                                                 close_path ? cmpl_ring_id :
5578                                                 INVALID_HW_RING_ID);
5579                         ring->fw_ring_id = INVALID_HW_RING_ID;
5580                 }
5581         }
5582
5583         for (i = 0; i < bp->rx_nr_rings; i++) {
5584                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5585                 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5586                 u32 grp_idx = rxr->bnapi->index;
5587
5588                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5589                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5590
5591                         hwrm_ring_free_send_msg(bp, ring,
5592                                                 RING_FREE_REQ_RING_TYPE_RX,
5593                                                 close_path ? cmpl_ring_id :
5594                                                 INVALID_HW_RING_ID);
5595                         ring->fw_ring_id = INVALID_HW_RING_ID;
5596                         bp->grp_info[grp_idx].rx_fw_ring_id =
5597                                 INVALID_HW_RING_ID;
5598                 }
5599         }
5600
5601         if (bp->flags & BNXT_FLAG_CHIP_P5)
5602                 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5603         else
5604                 type = RING_FREE_REQ_RING_TYPE_RX;
5605         for (i = 0; i < bp->rx_nr_rings; i++) {
5606                 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5607                 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
5608                 u32 grp_idx = rxr->bnapi->index;
5609
5610                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5611                         u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5612
5613                         hwrm_ring_free_send_msg(bp, ring, type,
5614                                                 close_path ? cmpl_ring_id :
5615                                                 INVALID_HW_RING_ID);
5616                         ring->fw_ring_id = INVALID_HW_RING_ID;
5617                         bp->grp_info[grp_idx].agg_fw_ring_id =
5618                                 INVALID_HW_RING_ID;
5619                 }
5620         }
5621
5622         /* The completion rings are about to be freed.  After that the
5623          * IRQ doorbell will not work anymore.  So we need to disable
5624          * IRQ here.
5625          */
5626         bnxt_disable_int_sync(bp);
5627
5628         if (bp->flags & BNXT_FLAG_CHIP_P5)
5629                 type = RING_FREE_REQ_RING_TYPE_NQ;
5630         else
5631                 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
5632         for (i = 0; i < bp->cp_nr_rings; i++) {
5633                 struct bnxt_napi *bnapi = bp->bnapi[i];
5634                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5635                 struct bnxt_ring_struct *ring;
5636                 int j;
5637
5638                 for (j = 0; j < 2; j++) {
5639                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5640
5641                         if (cpr2) {
5642                                 ring = &cpr2->cp_ring_struct;
5643                                 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5644                                         continue;
5645                                 hwrm_ring_free_send_msg(bp, ring,
5646                                         RING_FREE_REQ_RING_TYPE_L2_CMPL,
5647                                         INVALID_HW_RING_ID);
5648                                 ring->fw_ring_id = INVALID_HW_RING_ID;
5649                         }
5650                 }
5651                 ring = &cpr->cp_ring_struct;
5652                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5653                         hwrm_ring_free_send_msg(bp, ring, type,
5654                                                 INVALID_HW_RING_ID);
5655                         ring->fw_ring_id = INVALID_HW_RING_ID;
5656                         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
5657                 }
5658         }
5659 }
5660
5661 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5662                            bool shared);
5663
5664 static int bnxt_hwrm_get_rings(struct bnxt *bp)
5665 {
5666         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5667         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5668         struct hwrm_func_qcfg_input req = {0};
5669         int rc;
5670
5671         if (bp->hwrm_spec_code < 0x10601)
5672                 return 0;
5673
5674         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5675         req.fid = cpu_to_le16(0xffff);
5676         mutex_lock(&bp->hwrm_cmd_lock);
5677         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5678         if (rc) {
5679                 mutex_unlock(&bp->hwrm_cmd_lock);
5680                 return rc;
5681         }
5682
5683         hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5684         if (BNXT_NEW_RM(bp)) {
5685                 u16 cp, stats;
5686
5687                 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5688                 hw_resc->resv_hw_ring_grps =
5689                         le32_to_cpu(resp->alloc_hw_ring_grps);
5690                 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5691                 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5692                 stats = le16_to_cpu(resp->alloc_stat_ctx);
5693                 hw_resc->resv_irqs = cp;
5694                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5695                         int rx = hw_resc->resv_rx_rings;
5696                         int tx = hw_resc->resv_tx_rings;
5697
5698                         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5699                                 rx >>= 1;
5700                         if (cp < (rx + tx)) {
5701                                 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5702                                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5703                                         rx <<= 1;
5704                                 hw_resc->resv_rx_rings = rx;
5705                                 hw_resc->resv_tx_rings = tx;
5706                         }
5707                         hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
5708                         hw_resc->resv_hw_ring_grps = rx;
5709                 }
5710                 hw_resc->resv_cp_rings = cp;
5711                 hw_resc->resv_stat_ctxs = stats;
5712         }
5713         mutex_unlock(&bp->hwrm_cmd_lock);
5714         return 0;
5715 }
5716
5717 /* Caller must hold bp->hwrm_cmd_lock */
5718 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5719 {
5720         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5721         struct hwrm_func_qcfg_input req = {0};
5722         int rc;
5723
5724         if (bp->hwrm_spec_code < 0x10601)
5725                 return 0;
5726
5727         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5728         req.fid = cpu_to_le16(fid);
5729         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5730         if (!rc)
5731                 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5732
5733         return rc;
5734 }
5735
5736 static bool bnxt_rfs_supported(struct bnxt *bp);
5737
5738 static void
5739 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5740                              int tx_rings, int rx_rings, int ring_grps,
5741                              int cp_rings, int stats, int vnics)
5742 {
5743         u32 enables = 0;
5744
5745         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5746         req->fid = cpu_to_le16(0xffff);
5747         enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5748         req->num_tx_rings = cpu_to_le16(tx_rings);
5749         if (BNXT_NEW_RM(bp)) {
5750                 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
5751                 enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5752                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5753                         enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5754                         enables |= tx_rings + ring_grps ?
5755                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5756                         enables |= rx_rings ?
5757                                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5758                 } else {
5759                         enables |= cp_rings ?
5760                                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5761                         enables |= ring_grps ?
5762                                    FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5763                                    FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5764                 }
5765                 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
5766
5767                 req->num_rx_rings = cpu_to_le16(rx_rings);
5768                 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5769                         req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5770                         req->num_msix = cpu_to_le16(cp_rings);
5771                         req->num_rsscos_ctxs =
5772                                 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5773                 } else {
5774                         req->num_cmpl_rings = cpu_to_le16(cp_rings);
5775                         req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5776                         req->num_rsscos_ctxs = cpu_to_le16(1);
5777                         if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5778                             bnxt_rfs_supported(bp))
5779                                 req->num_rsscos_ctxs =
5780                                         cpu_to_le16(ring_grps + 1);
5781                 }
5782                 req->num_stat_ctxs = cpu_to_le16(stats);
5783                 req->num_vnics = cpu_to_le16(vnics);
5784         }
5785         req->enables = cpu_to_le32(enables);
5786 }
5787
5788 static void
5789 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5790                              struct hwrm_func_vf_cfg_input *req, int tx_rings,
5791                              int rx_rings, int ring_grps, int cp_rings,
5792                              int stats, int vnics)
5793 {
5794         u32 enables = 0;
5795
5796         bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5797         enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
5798         enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5799                               FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5800         enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5801         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5802                 enables |= tx_rings + ring_grps ?
5803                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5804         } else {
5805                 enables |= cp_rings ?
5806                            FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
5807                 enables |= ring_grps ?
5808                            FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5809         }
5810         enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
5811         enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
5812
5813         req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
5814         req->num_tx_rings = cpu_to_le16(tx_rings);
5815         req->num_rx_rings = cpu_to_le16(rx_rings);
5816         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5817                 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5818                 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5819         } else {
5820                 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5821                 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5822                 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
5823         }
5824         req->num_stat_ctxs = cpu_to_le16(stats);
5825         req->num_vnics = cpu_to_le16(vnics);
5826
5827         req->enables = cpu_to_le32(enables);
5828 }
5829
5830 static int
5831 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5832                            int ring_grps, int cp_rings, int stats, int vnics)
5833 {
5834         struct hwrm_func_cfg_input req = {0};
5835         int rc;
5836
5837         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5838                                      cp_rings, stats, vnics);
5839         if (!req.enables)
5840                 return 0;
5841
5842         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5843         if (rc)
5844                 return rc;
5845
5846         if (bp->hwrm_spec_code < 0x10601)
5847                 bp->hw_resc.resv_tx_rings = tx_rings;
5848
5849         rc = bnxt_hwrm_get_rings(bp);
5850         return rc;
5851 }
5852
5853 static int
5854 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5855                            int ring_grps, int cp_rings, int stats, int vnics)
5856 {
5857         struct hwrm_func_vf_cfg_input req = {0};
5858         int rc;
5859
5860         if (!BNXT_NEW_RM(bp)) {
5861                 bp->hw_resc.resv_tx_rings = tx_rings;
5862                 return 0;
5863         }
5864
5865         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5866                                      cp_rings, stats, vnics);
5867         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5868         if (rc)
5869                 return rc;
5870
5871         rc = bnxt_hwrm_get_rings(bp);
5872         return rc;
5873 }
5874
5875 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
5876                                    int cp, int stat, int vnic)
5877 {
5878         if (BNXT_PF(bp))
5879                 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
5880                                                   vnic);
5881         else
5882                 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
5883                                                   vnic);
5884 }
5885
5886 int bnxt_nq_rings_in_use(struct bnxt *bp)
5887 {
5888         int cp = bp->cp_nr_rings;
5889         int ulp_msix, ulp_base;
5890
5891         ulp_msix = bnxt_get_ulp_msix_num(bp);
5892         if (ulp_msix) {
5893                 ulp_base = bnxt_get_ulp_msix_base(bp);
5894                 cp += ulp_msix;
5895                 if ((ulp_base + ulp_msix) > cp)
5896                         cp = ulp_base + ulp_msix;
5897         }
5898         return cp;
5899 }
5900
5901 static int bnxt_cp_rings_in_use(struct bnxt *bp)
5902 {
5903         int cp;
5904
5905         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5906                 return bnxt_nq_rings_in_use(bp);
5907
5908         cp = bp->tx_nr_rings + bp->rx_nr_rings;
5909         return cp;
5910 }
5911
5912 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
5913 {
5914         int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
5915         int cp = bp->cp_nr_rings;
5916
5917         if (!ulp_stat)
5918                 return cp;
5919
5920         if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
5921                 return bnxt_get_ulp_msix_base(bp) + ulp_stat;
5922
5923         return cp + ulp_stat;
5924 }
5925
5926 static bool bnxt_need_reserve_rings(struct bnxt *bp)
5927 {
5928         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5929         int cp = bnxt_cp_rings_in_use(bp);
5930         int nq = bnxt_nq_rings_in_use(bp);
5931         int rx = bp->rx_nr_rings, stat;
5932         int vnic = 1, grp = rx;
5933
5934         if (bp->hwrm_spec_code < 0x10601)
5935                 return false;
5936
5937         if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
5938                 return true;
5939
5940         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5941                 vnic = rx + 1;
5942         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5943                 rx <<= 1;
5944         stat = bnxt_get_func_stat_ctxs(bp);
5945         if (BNXT_NEW_RM(bp) &&
5946             (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
5947              hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
5948              (hw_resc->resv_hw_ring_grps != grp &&
5949               !(bp->flags & BNXT_FLAG_CHIP_P5))))
5950                 return true;
5951         if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
5952             hw_resc->resv_irqs != nq)
5953                 return true;
5954         return false;
5955 }
5956
5957 static int __bnxt_reserve_rings(struct bnxt *bp)
5958 {
5959         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5960         int cp = bnxt_nq_rings_in_use(bp);
5961         int tx = bp->tx_nr_rings;
5962         int rx = bp->rx_nr_rings;
5963         int grp, rx_rings, rc;
5964         int vnic = 1, stat;
5965         bool sh = false;
5966
5967         if (!bnxt_need_reserve_rings(bp))
5968                 return 0;
5969
5970         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5971                 sh = true;
5972         if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
5973                 vnic = rx + 1;
5974         if (bp->flags & BNXT_FLAG_AGG_RINGS)
5975                 rx <<= 1;
5976         grp = bp->rx_nr_rings;
5977         stat = bnxt_get_func_stat_ctxs(bp);
5978
5979         rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
5980         if (rc)
5981                 return rc;
5982
5983         tx = hw_resc->resv_tx_rings;
5984         if (BNXT_NEW_RM(bp)) {
5985                 rx = hw_resc->resv_rx_rings;
5986                 cp = hw_resc->resv_irqs;
5987                 grp = hw_resc->resv_hw_ring_grps;
5988                 vnic = hw_resc->resv_vnics;
5989                 stat = hw_resc->resv_stat_ctxs;
5990         }
5991
5992         rx_rings = rx;
5993         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5994                 if (rx >= 2) {
5995                         rx_rings = rx >> 1;
5996                 } else {
5997                         if (netif_running(bp->dev))
5998                                 return -ENOMEM;
5999
6000                         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6001                         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6002                         bp->dev->hw_features &= ~NETIF_F_LRO;
6003                         bp->dev->features &= ~NETIF_F_LRO;
6004                         bnxt_set_ring_params(bp);
6005                 }
6006         }
6007         rx_rings = min_t(int, rx_rings, grp);
6008         cp = min_t(int, cp, bp->cp_nr_rings);
6009         if (stat > bnxt_get_ulp_stat_ctxs(bp))
6010                 stat -= bnxt_get_ulp_stat_ctxs(bp);
6011         cp = min_t(int, cp, stat);
6012         rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6013         if (bp->flags & BNXT_FLAG_AGG_RINGS)
6014                 rx = rx_rings << 1;
6015         cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6016         bp->tx_nr_rings = tx;
6017         bp->rx_nr_rings = rx_rings;
6018         bp->cp_nr_rings = cp;
6019
6020         if (!tx || !rx || !cp || !grp || !vnic || !stat)
6021                 return -ENOMEM;
6022
6023         return rc;
6024 }
6025
6026 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6027                                     int ring_grps, int cp_rings, int stats,
6028                                     int vnics)
6029 {
6030         struct hwrm_func_vf_cfg_input req = {0};
6031         u32 flags;
6032         int rc;
6033
6034         if (!BNXT_NEW_RM(bp))
6035                 return 0;
6036
6037         __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6038                                      cp_rings, stats, vnics);
6039         flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6040                 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6041                 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6042                 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6043                 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6044                 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6045         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6046                 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6047
6048         req.flags = cpu_to_le32(flags);
6049         rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6050         return rc;
6051 }
6052
6053 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6054                                     int ring_grps, int cp_rings, int stats,
6055                                     int vnics)
6056 {
6057         struct hwrm_func_cfg_input req = {0};
6058         u32 flags;
6059         int rc;
6060
6061         __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
6062                                      cp_rings, stats, vnics);
6063         flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6064         if (BNXT_NEW_RM(bp)) {
6065                 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6066                          FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6067                          FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6068                          FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6069                 if (bp->flags & BNXT_FLAG_CHIP_P5)
6070                         flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6071                                  FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6072                 else
6073                         flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6074         }
6075
6076         req.flags = cpu_to_le32(flags);
6077         rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6078         return rc;
6079 }
6080
6081 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6082                                  int ring_grps, int cp_rings, int stats,
6083                                  int vnics)
6084 {
6085         if (bp->hwrm_spec_code < 0x10801)
6086                 return 0;
6087
6088         if (BNXT_PF(bp))
6089                 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6090                                                 ring_grps, cp_rings, stats,
6091                                                 vnics);
6092
6093         return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6094                                         cp_rings, stats, vnics);
6095 }
6096
6097 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6098 {
6099         struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6100         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6101         struct hwrm_ring_aggint_qcaps_input req = {0};
6102         int rc;
6103
6104         coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6105         coal_cap->num_cmpl_dma_aggr_max = 63;
6106         coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6107         coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6108         coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6109         coal_cap->int_lat_tmr_min_max = 65535;
6110         coal_cap->int_lat_tmr_max_max = 65535;
6111         coal_cap->num_cmpl_aggr_int_max = 65535;
6112         coal_cap->timer_units = 80;
6113
6114         if (bp->hwrm_spec_code < 0x10902)
6115                 return;
6116
6117         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
6118         mutex_lock(&bp->hwrm_cmd_lock);
6119         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6120         if (!rc) {
6121                 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6122                 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6123                 coal_cap->num_cmpl_dma_aggr_max =
6124                         le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6125                 coal_cap->num_cmpl_dma_aggr_during_int_max =
6126                         le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6127                 coal_cap->cmpl_aggr_dma_tmr_max =
6128                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6129                 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6130                         le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6131                 coal_cap->int_lat_tmr_min_max =
6132                         le16_to_cpu(resp->int_lat_tmr_min_max);
6133                 coal_cap->int_lat_tmr_max_max =
6134                         le16_to_cpu(resp->int_lat_tmr_max_max);
6135                 coal_cap->num_cmpl_aggr_int_max =
6136                         le16_to_cpu(resp->num_cmpl_aggr_int_max);
6137                 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6138         }
6139         mutex_unlock(&bp->hwrm_cmd_lock);
6140 }
6141
6142 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6143 {
6144         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6145
6146         return usec * 1000 / coal_cap->timer_units;
6147 }
6148
6149 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6150         struct bnxt_coal *hw_coal,
6151         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6152 {
6153         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6154         u32 cmpl_params = coal_cap->cmpl_params;
6155         u16 val, tmr, max, flags = 0;
6156
6157         max = hw_coal->bufs_per_record * 128;
6158         if (hw_coal->budget)
6159                 max = hw_coal->bufs_per_record * hw_coal->budget;
6160         max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6161
6162         val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6163         req->num_cmpl_aggr_int = cpu_to_le16(val);
6164
6165         val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6166         req->num_cmpl_dma_aggr = cpu_to_le16(val);
6167
6168         val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6169                       coal_cap->num_cmpl_dma_aggr_during_int_max);
6170         req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6171
6172         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6173         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6174         req->int_lat_tmr_max = cpu_to_le16(tmr);
6175
6176         /* min timer set to 1/2 of interrupt timer */
6177         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6178                 val = tmr / 2;
6179                 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6180                 req->int_lat_tmr_min = cpu_to_le16(val);
6181                 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6182         }
6183
6184         /* buf timer set to 1/4 of interrupt timer */
6185         val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6186         req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6187
6188         if (cmpl_params &
6189             RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6190                 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6191                 val = clamp_t(u16, tmr, 1,
6192                               coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6193                 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
6194                 req->enables |=
6195                         cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6196         }
6197
6198         if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
6199                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
6200         if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6201             hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6202                 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6203         req->flags = cpu_to_le16(flags);
6204         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6205 }
6206
6207 /* Caller holds bp->hwrm_cmd_lock */
6208 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6209                                    struct bnxt_coal *hw_coal)
6210 {
6211         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
6212         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6213         struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6214         u32 nq_params = coal_cap->nq_params;
6215         u16 tmr;
6216
6217         if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6218                 return 0;
6219
6220         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
6221                                -1, -1);
6222         req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6223         req.flags =
6224                 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6225
6226         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6227         tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6228         req.int_lat_tmr_min = cpu_to_le16(tmr);
6229         req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6230         return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6231 }
6232
6233 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6234 {
6235         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
6236         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6237         struct bnxt_coal coal;
6238
6239         /* Tick values in micro seconds.
6240          * 1 coal_buf x bufs_per_record = 1 completion record.
6241          */
6242         memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6243
6244         coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6245         coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6246
6247         if (!bnapi->rx_ring)
6248                 return -ENODEV;
6249
6250         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6251                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6252
6253         bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
6254
6255         req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6256
6257         return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
6258                                  HWRM_CMD_TIMEOUT);
6259 }
6260
6261 int bnxt_hwrm_set_coal(struct bnxt *bp)
6262 {
6263         int i, rc = 0;
6264         struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
6265                                                            req_tx = {0}, *req;
6266
6267         bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
6268                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6269         bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
6270                                HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
6271
6272         bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
6273         bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
6274
6275         mutex_lock(&bp->hwrm_cmd_lock);
6276         for (i = 0; i < bp->cp_nr_rings; i++) {
6277                 struct bnxt_napi *bnapi = bp->bnapi[i];
6278                 struct bnxt_coal *hw_coal;
6279                 u16 ring_id;
6280
6281                 req = &req_rx;
6282                 if (!bnapi->rx_ring) {
6283                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6284                         req = &req_tx;
6285                 } else {
6286                         ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6287                 }
6288                 req->ring_id = cpu_to_le16(ring_id);
6289
6290                 rc = _hwrm_send_message(bp, req, sizeof(*req),
6291                                         HWRM_CMD_TIMEOUT);
6292                 if (rc)
6293                         break;
6294
6295                 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6296                         continue;
6297
6298                 if (bnapi->rx_ring && bnapi->tx_ring) {
6299                         req = &req_tx;
6300                         ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6301                         req->ring_id = cpu_to_le16(ring_id);
6302                         rc = _hwrm_send_message(bp, req, sizeof(*req),
6303                                                 HWRM_CMD_TIMEOUT);
6304                         if (rc)
6305                                 break;
6306                 }
6307                 if (bnapi->rx_ring)
6308                         hw_coal = &bp->rx_coal;
6309                 else
6310                         hw_coal = &bp->tx_coal;
6311                 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6312         }
6313         mutex_unlock(&bp->hwrm_cmd_lock);
6314         return rc;
6315 }
6316
6317 static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6318 {
6319         int rc = 0, i;
6320         struct hwrm_stat_ctx_free_input req = {0};
6321
6322         if (!bp->bnapi)
6323                 return 0;
6324
6325         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6326                 return 0;
6327
6328         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
6329
6330         mutex_lock(&bp->hwrm_cmd_lock);
6331         for (i = 0; i < bp->cp_nr_rings; i++) {
6332                 struct bnxt_napi *bnapi = bp->bnapi[i];
6333                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6334
6335                 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6336                         req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6337
6338                         rc = _hwrm_send_message(bp, &req, sizeof(req),
6339                                                 HWRM_CMD_TIMEOUT);
6340
6341                         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6342                 }
6343         }
6344         mutex_unlock(&bp->hwrm_cmd_lock);
6345         return rc;
6346 }
6347
6348 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6349 {
6350         int rc = 0, i;
6351         struct hwrm_stat_ctx_alloc_input req = {0};
6352         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
6353
6354         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6355                 return 0;
6356
6357         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
6358
6359         req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6360         req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6361
6362         mutex_lock(&bp->hwrm_cmd_lock);
6363         for (i = 0; i < bp->cp_nr_rings; i++) {
6364                 struct bnxt_napi *bnapi = bp->bnapi[i];
6365                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6366
6367                 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
6368
6369                 rc = _hwrm_send_message(bp, &req, sizeof(req),
6370                                         HWRM_CMD_TIMEOUT);
6371                 if (rc)
6372                         break;
6373
6374                 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6375
6376                 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6377         }
6378         mutex_unlock(&bp->hwrm_cmd_lock);
6379         return rc;
6380 }
6381
6382 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6383 {
6384         struct hwrm_func_qcfg_input req = {0};
6385         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
6386         u16 flags;
6387         int rc;
6388
6389         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
6390         req.fid = cpu_to_le16(0xffff);
6391         mutex_lock(&bp->hwrm_cmd_lock);
6392         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6393         if (rc)
6394                 goto func_qcfg_exit;
6395
6396 #ifdef CONFIG_BNXT_SRIOV
6397         if (BNXT_VF(bp)) {
6398                 struct bnxt_vf_info *vf = &bp->vf;
6399
6400                 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6401         } else {
6402                 bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6403         }
6404 #endif
6405         flags = le16_to_cpu(resp->flags);
6406         if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6407                      FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6408                 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6409                 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6410                         bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6411         }
6412         if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6413                 bp->flags |= BNXT_FLAG_MULTI_HOST;
6414
6415         switch (resp->port_partition_type) {
6416         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6417         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6418         case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6419                 bp->port_partition_type = resp->port_partition_type;
6420                 break;
6421         }
6422         if (bp->hwrm_spec_code < 0x10707 ||
6423             resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6424                 bp->br_mode = BRIDGE_MODE_VEB;
6425         else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6426                 bp->br_mode = BRIDGE_MODE_VEPA;
6427         else
6428                 bp->br_mode = BRIDGE_MODE_UNDEF;
6429
6430         bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6431         if (!bp->max_mtu)
6432                 bp->max_mtu = BNXT_MAX_MTU;
6433
6434 func_qcfg_exit:
6435         mutex_unlock(&bp->hwrm_cmd_lock);
6436         return rc;
6437 }
6438
6439 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
6440 {
6441         struct hwrm_func_backing_store_qcaps_input req = {0};
6442         struct hwrm_func_backing_store_qcaps_output *resp =
6443                 bp->hwrm_cmd_resp_addr;
6444         int rc;
6445
6446         if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
6447                 return 0;
6448
6449         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
6450         mutex_lock(&bp->hwrm_cmd_lock);
6451         rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6452         if (!rc) {
6453                 struct bnxt_ctx_pg_info *ctx_pg;
6454                 struct bnxt_ctx_mem_info *ctx;
6455                 int i;
6456
6457                 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
6458                 if (!ctx) {
6459                         rc = -ENOMEM;
6460                         goto ctx_err;
6461                 }
6462                 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
6463                 if (!ctx_pg) {
6464                         kfree(ctx);
6465                         rc = -ENOMEM;
6466                         goto ctx_err;
6467                 }
6468                 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
6469                         ctx->tqm_mem[i] = ctx_pg;
6470
6471                 bp->ctx = ctx;
6472                 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
6473                 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
6474                 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
6475                 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
6476                 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
6477                 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
6478                 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
6479                 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
6480                 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
6481                 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
6482                 ctx->vnic_max_vnic_entries =
6483                         le16_to_cpu(resp->vnic_max_vnic_entries);
6484                 ctx->vnic_max_ring_table_entries =
6485                         le16_to_cpu(resp->vnic_max_ring_table_entries);
6486                 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
6487                 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
6488                 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
6489                 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
6490                 ctx->tqm_min_entries_per_ring =
6491                         le32_to_cpu(resp->tqm_min_entries_per_ring);
6492                 ctx->tqm_max_entries_per_ring =
6493                         le32_to_cpu(resp->tqm_max_entries_per_ring);
6494                 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
6495                 if (!ctx->tqm_entries_multiple)
6496                         ctx->tqm_entries_multiple = 1;
6497                 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
6498                 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
6499                 ctx->mrav_num_entries_units =
6500                         le16_to_cpu(resp->mrav_num_entries_units);
6501                 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
6502                 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
6503                 ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
6504         } else {
6505                 rc = 0;
6506         }
6507 ctx_err:
6508         mutex_unlock(&bp->hwrm_cmd_lock);
6509         return rc;
6510 }
6511
6512 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
6513                                   __le64 *pg_dir)
6514 {
6515         u8 pg_size = 0;
6516
6517         if (BNXT_PAGE_SHIFT == 13)
6518                 pg_size = 1 << 4;
6519         else if (BNXT_PAGE_SIZE == 16)
6520                 pg_size = 2 << 4;
6521
6522         *pg_attr = pg_size;
6523         if (rmem->depth >= 1) {
6524                 if (rmem->depth == 2)
6525                         *pg_attr |= 2;
6526                 else
6527                         *pg_attr |= 1;
6528                 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
6529         } else {
6530                 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
6531         }
6532 }
6533
6534 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES                 \
6535         (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |                \
6536          FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |               \
6537          FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |                \
6538          FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |              \
6539          FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
6540
6541 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
6542 {
6543         struct hwrm_func_backing_store_cfg_input req = {0};
6544         struct bnxt_ctx_mem_info *ctx = bp->ctx;
6545         struct bnxt_ctx_pg_info *ctx_pg;
6546         __le32 *num_entries;
6547         __le64 *pg_dir;
6548         u32 flags = 0;
6549         u8 *pg_attr;
6550         int i, rc;
6551         u32 ena;
6552
6553         if (!ctx)
6554                 return 0;
6555
6556         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
6557         req.enables = cpu_to_le32(enables);
6558
6559         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
6560                 ctx_pg = &ctx->qp_mem;
6561                 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
6562                 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
6563                 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
6564                 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
6565                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6566                                       &req.qpc_pg_size_qpc_lvl,
6567                                       &req.qpc_page_dir);
6568         }
6569         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
6570                 ctx_pg = &ctx->srq_mem;
6571                 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
6572                 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
6573                 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6574                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6575                                       &req.srq_pg_size_srq_lvl,
6576                                       &req.srq_page_dir);
6577         }
6578         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6579                 ctx_pg = &ctx->cq_mem;
6580                 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6581                 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6582                 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6583                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6584                                       &req.cq_page_dir);
6585         }
6586         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6587                 ctx_pg = &ctx->vnic_mem;
6588                 req.vnic_num_vnic_entries =
6589                         cpu_to_le16(ctx->vnic_max_vnic_entries);
6590                 req.vnic_num_ring_table_entries =
6591                         cpu_to_le16(ctx->vnic_max_ring_table_entries);
6592                 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6593                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6594                                       &req.vnic_pg_size_vnic_lvl,
6595                                       &req.vnic_page_dir);
6596         }
6597         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6598                 ctx_pg = &ctx->stat_mem;
6599                 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6600                 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6601                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6602                                       &req.stat_pg_size_stat_lvl,
6603                                       &req.stat_page_dir);
6604         }
6605         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
6606                 ctx_pg = &ctx->mrav_mem;
6607                 req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
6608                 if (ctx->mrav_num_entries_units)
6609                         flags |=
6610                         FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
6611                 req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
6612                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6613                                       &req.mrav_pg_size_mrav_lvl,
6614                                       &req.mrav_page_dir);
6615         }
6616         if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
6617                 ctx_pg = &ctx->tim_mem;
6618                 req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
6619                 req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
6620                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6621                                       &req.tim_pg_size_tim_lvl,
6622                                       &req.tim_page_dir);
6623         }
6624         for (i = 0, num_entries = &req.tqm_sp_num_entries,
6625              pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6626              pg_dir = &req.tqm_sp_page_dir,
6627              ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6628              i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6629                 if (!(enables & ena))
6630                         continue;
6631
6632                 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6633                 ctx_pg = ctx->tqm_mem[i];
6634                 *num_entries = cpu_to_le32(ctx_pg->entries);
6635                 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6636         }
6637         req.flags = cpu_to_le32(flags);
6638         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6639         return rc;
6640 }
6641
6642 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
6643                                   struct bnxt_ctx_pg_info *ctx_pg)
6644 {
6645         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6646
6647         rmem->page_size = BNXT_PAGE_SIZE;
6648         rmem->pg_arr = ctx_pg->ctx_pg_arr;
6649         rmem->dma_arr = ctx_pg->ctx_dma_arr;
6650         rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
6651         if (rmem->depth >= 1)
6652                 rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
6653         return bnxt_alloc_ring(bp, rmem);
6654 }
6655
6656 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
6657                                   struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
6658                                   u8 depth, bool use_init_val)
6659 {
6660         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6661         int rc;
6662
6663         if (!mem_size)
6664                 return 0;
6665
6666         ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6667         if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
6668                 ctx_pg->nr_pages = 0;
6669                 return -EINVAL;
6670         }
6671         if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
6672                 int nr_tbls, i;
6673
6674                 rmem->depth = 2;
6675                 ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
6676                                              GFP_KERNEL);
6677                 if (!ctx_pg->ctx_pg_tbl)
6678                         return -ENOMEM;
6679                 nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
6680                 rmem->nr_pages = nr_tbls;
6681                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6682                 if (rc)
6683                         return rc;
6684                 for (i = 0; i < nr_tbls; i++) {
6685                         struct bnxt_ctx_pg_info *pg_tbl;
6686
6687                         pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
6688                         if (!pg_tbl)
6689                                 return -ENOMEM;
6690                         ctx_pg->ctx_pg_tbl[i] = pg_tbl;
6691                         rmem = &pg_tbl->ring_mem;
6692                         rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
6693                         rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
6694                         rmem->depth = 1;
6695                         rmem->nr_pages = MAX_CTX_PAGES;
6696                         if (use_init_val)
6697                                 rmem->init_val = bp->ctx->ctx_kind_initializer;
6698                         if (i == (nr_tbls - 1)) {
6699                                 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
6700
6701                                 if (rem)
6702                                         rmem->nr_pages = rem;
6703                         }
6704                         rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
6705                         if (rc)
6706                                 break;
6707                 }
6708         } else {
6709                 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6710                 if (rmem->nr_pages > 1 || depth)
6711                         rmem->depth = 1;
6712                 if (use_init_val)
6713                         rmem->init_val = bp->ctx->ctx_kind_initializer;
6714                 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
6715         }
6716         return rc;
6717 }
6718
6719 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
6720                                   struct bnxt_ctx_pg_info *ctx_pg)
6721 {
6722         struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6723
6724         if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
6725             ctx_pg->ctx_pg_tbl) {
6726                 int i, nr_tbls = rmem->nr_pages;
6727
6728                 for (i = 0; i < nr_tbls; i++) {
6729                         struct bnxt_ctx_pg_info *pg_tbl;
6730                         struct bnxt_ring_mem_info *rmem2;
6731
6732                         pg_tbl = ctx_pg->ctx_pg_tbl[i];
6733                         if (!pg_tbl)
6734                                 continue;
6735                         rmem2 = &pg_tbl->ring_mem;
6736                         bnxt_free_ring(bp, rmem2);
6737                         ctx_pg->ctx_pg_arr[i] = NULL;
6738                         kfree(pg_tbl);
6739                         ctx_pg->ctx_pg_tbl[i] = NULL;
6740                 }
6741                 kfree(ctx_pg->ctx_pg_tbl);
6742                 ctx_pg->ctx_pg_tbl = NULL;
6743         }
6744         bnxt_free_ring(bp, rmem);
6745         ctx_pg->nr_pages = 0;
6746 }
6747
6748 static void bnxt_free_ctx_mem(struct bnxt *bp)
6749 {
6750         struct bnxt_ctx_mem_info *ctx = bp->ctx;
6751         int i;
6752
6753         if (!ctx)
6754                 return;
6755
6756         if (ctx->tqm_mem[0]) {
6757                 for (i = 0; i < bp->max_q + 1; i++)
6758                         bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
6759                 kfree(ctx->tqm_mem[0]);
6760                 ctx->tqm_mem[0] = NULL;
6761         }
6762
6763         bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
6764         bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
6765         bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
6766         bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
6767         bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
6768         bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
6769         bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
6770         ctx->flags &= ~BNXT_CTX_FLAG_INITED;
6771 }
6772
6773 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6774 {
6775         struct bnxt_ctx_pg_info *ctx_pg;
6776         struct bnxt_ctx_mem_info *ctx;
6777         u32 mem_size, ena, entries;
6778         u32 num_mr, num_ah;
6779         u32 extra_srqs = 0;
6780         u32 extra_qps = 0;
6781         u8 pg_lvl = 1;
6782         int i, rc;
6783
6784         rc = bnxt_hwrm_func_backing_store_qcaps(bp);
6785         if (rc) {
6786                 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
6787                            rc);
6788                 return rc;
6789         }
6790         ctx = bp->ctx;
6791         if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
6792                 return 0;
6793
6794         if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
6795                 pg_lvl = 2;
6796                 extra_qps = 65536;
6797                 extra_srqs = 8192;
6798         }
6799
6800         ctx_pg = &ctx->qp_mem;
6801         ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
6802                           extra_qps;
6803         mem_size = ctx->qp_entry_size * ctx_pg->entries;
6804         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
6805         if (rc)
6806                 return rc;
6807
6808         ctx_pg = &ctx->srq_mem;
6809         ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
6810         mem_size = ctx->srq_entry_size * ctx_pg->entries;
6811         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
6812         if (rc)
6813                 return rc;
6814
6815         ctx_pg = &ctx->cq_mem;
6816         ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
6817         mem_size = ctx->cq_entry_size * ctx_pg->entries;
6818         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
6819         if (rc)
6820                 return rc;
6821
6822         ctx_pg = &ctx->vnic_mem;
6823         ctx_pg->entries = ctx->vnic_max_vnic_entries +
6824                           ctx->vnic_max_ring_table_entries;
6825         mem_size = ctx->vnic_entry_size * ctx_pg->entries;
6826         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
6827         if (rc)
6828                 return rc;
6829
6830         ctx_pg = &ctx->stat_mem;
6831         ctx_pg->entries = ctx->stat_max_entries;
6832         mem_size = ctx->stat_entry_size * ctx_pg->entries;
6833         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
6834         if (rc)
6835                 return rc;
6836
6837         ena = 0;
6838         if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
6839                 goto skip_rdma;
6840
6841         ctx_pg = &ctx->mrav_mem;
6842         /* 128K extra is needed to accommodate static AH context
6843          * allocation by f/w.
6844          */
6845         num_mr = 1024 * 256;
6846         num_ah = 1024 * 128;
6847         ctx_pg->entries = num_mr + num_ah;
6848         mem_size = ctx->mrav_entry_size * ctx_pg->entries;
6849         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
6850         if (rc)
6851                 return rc;
6852         ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
6853         if (ctx->mrav_num_entries_units)
6854                 ctx_pg->entries =
6855                         ((num_mr / ctx->mrav_num_entries_units) << 16) |
6856                          (num_ah / ctx->mrav_num_entries_units);
6857
6858         ctx_pg = &ctx->tim_mem;
6859         ctx_pg->entries = ctx->qp_mem.entries;
6860         mem_size = ctx->tim_entry_size * ctx_pg->entries;
6861         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
6862         if (rc)
6863                 return rc;
6864         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
6865
6866 skip_rdma:
6867         entries = ctx->qp_max_l2_entries + extra_qps;
6868         entries = roundup(entries, ctx->tqm_entries_multiple);
6869         entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
6870                           ctx->tqm_max_entries_per_ring);
6871         for (i = 0; i < bp->max_q + 1; i++) {
6872                 ctx_pg = ctx->tqm_mem[i];
6873                 ctx_pg->entries = entries;
6874                 mem_size = ctx->tqm_entry_size * entries;
6875                 rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
6876                 if (rc)
6877                         return rc;
6878                 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
6879         }
6880         ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
6881         rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
6882         if (rc)
6883                 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
6884                            rc);
6885         else
6886                 ctx->flags |= BNXT_CTX_FLAG_INITED;
6887
6888         return 0;
6889 }
6890
6891 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
6892 {
6893         struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6894         struct hwrm_func_resource_qcaps_input req = {0};
6895         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6896         int rc;
6897
6898         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
6899         req.fid = cpu_to_le16(0xffff);
6900
6901         mutex_lock(&bp->hwrm_cmd_lock);
6902         rc = _hwrm_send_message_silent(bp, &req, sizeof(req),
6903                                        HWRM_CMD_TIMEOUT);
6904         if (rc)
6905                 goto hwrm_func_resc_qcaps_exit;
6906
6907         hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
6908         if (!all)
6909                 goto hwrm_func_resc_qcaps_exit;
6910
6911         hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
6912         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6913         hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
6914         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6915         hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
6916         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6917         hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
6918         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6919         hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
6920         hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
6921         hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
6922         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6923         hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
6924         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6925         hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
6926         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6927
6928         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6929                 u16 max_msix = le16_to_cpu(resp->max_msix);
6930
6931                 hw_resc->max_nqs = max_msix;
6932                 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
6933         }
6934
6935         if (BNXT_PF(bp)) {
6936                 struct bnxt_pf_info *pf = &bp->pf;
6937
6938                 pf->vf_resv_strategy =
6939                         le16_to_cpu(resp->vf_reservation_strategy);
6940                 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
6941                         pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
6942         }
6943 hwrm_func_resc_qcaps_exit:
6944         mutex_unlock(&bp->hwrm_cmd_lock);
6945         return rc;
6946 }
6947
6948 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
6949 {
6950         int rc = 0;
6951         struct hwrm_func_qcaps_input req = {0};
6952         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6953         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6954         u32 flags;
6955
6956         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
6957         req.fid = cpu_to_le16(0xffff);
6958
6959         mutex_lock(&bp->hwrm_cmd_lock);
6960         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6961         if (rc)
6962                 goto hwrm_func_qcaps_exit;
6963
6964         flags = le32_to_cpu(resp->flags);
6965         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
6966                 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
6967         if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
6968                 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
6969         if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
6970                 bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
6971         if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
6972                 bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
6973         if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
6974                 bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
6975         if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
6976                 bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
6977         if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
6978                 bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
6979
6980         bp->tx_push_thresh = 0;
6981         if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
6982                 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
6983
6984         hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6985         hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6986         hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6987         hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6988         hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
6989         if (!hw_resc->max_hw_ring_grps)
6990                 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
6991         hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6992         hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6993         hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6994
6995         if (BNXT_PF(bp)) {
6996                 struct bnxt_pf_info *pf = &bp->pf;
6997
6998                 pf->fw_fid = le16_to_cpu(resp->fid);
6999                 pf->port_id = le16_to_cpu(resp->port_id);
7000                 bp->dev->dev_port = pf->port_id;
7001                 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7002                 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7003                 pf->max_vfs = le16_to_cpu(resp->max_vfs);
7004                 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7005                 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7006                 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7007                 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7008                 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7009                 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7010                 bp->flags &= ~BNXT_FLAG_WOL_CAP;
7011                 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7012                         bp->flags |= BNXT_FLAG_WOL_CAP;
7013         } else {
7014 #ifdef CONFIG_BNXT_SRIOV
7015                 struct bnxt_vf_info *vf = &bp->vf;
7016
7017                 vf->fw_fid = le16_to_cpu(resp->fid);
7018                 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7019 #endif
7020         }
7021
7022 hwrm_func_qcaps_exit:
7023         mutex_unlock(&bp->hwrm_cmd_lock);
7024         return rc;
7025 }
7026
7027 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7028
7029 static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7030 {
7031         int rc;
7032
7033         rc = __bnxt_hwrm_func_qcaps(bp);
7034         if (rc)
7035                 return rc;
7036         rc = bnxt_hwrm_queue_qportcfg(bp);
7037         if (rc) {
7038                 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7039                 return rc;
7040         }
7041         if (bp->hwrm_spec_code >= 0x10803) {
7042                 rc = bnxt_alloc_ctx_mem(bp);
7043                 if (rc)
7044                         return rc;
7045                 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7046                 if (!rc)
7047                         bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7048         }
7049         return 0;
7050 }
7051
7052 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7053 {
7054         struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0};
7055         struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7056         int rc = 0;
7057         u32 flags;
7058
7059         if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7060                 return 0;
7061
7062         resp = bp->hwrm_cmd_resp_addr;
7063         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1);
7064
7065         mutex_lock(&bp->hwrm_cmd_lock);
7066         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7067         if (rc)
7068                 goto hwrm_cfa_adv_qcaps_exit;
7069
7070         flags = le32_to_cpu(resp->flags);
7071         if (flags &
7072             CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7073                 bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7074
7075 hwrm_cfa_adv_qcaps_exit:
7076         mutex_unlock(&bp->hwrm_cmd_lock);
7077         return rc;
7078 }
7079
7080 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7081 {
7082         struct bnxt_fw_health *fw_health = bp->fw_health;
7083         u32 reg_base = 0xffffffff;
7084         int i;
7085
7086         /* Only pre-map the monitoring GRC registers using window 3 */
7087         for (i = 0; i < 4; i++) {
7088                 u32 reg = fw_health->regs[i];
7089
7090                 if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7091                         continue;
7092                 if (reg_base == 0xffffffff)
7093                         reg_base = reg & BNXT_GRC_BASE_MASK;
7094                 if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7095                         return -ERANGE;
7096                 fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE +
7097                                             (reg & BNXT_GRC_OFFSET_MASK);
7098         }
7099         if (reg_base == 0xffffffff)
7100                 return 0;
7101
7102         writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7103                          BNXT_FW_HEALTH_WIN_MAP_OFF);
7104         return 0;
7105 }
7106
7107 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7108 {
7109         struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7110         struct bnxt_fw_health *fw_health = bp->fw_health;
7111         struct hwrm_error_recovery_qcfg_input req = {0};
7112         int rc, i;
7113
7114         if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7115                 return 0;
7116
7117         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1);
7118         mutex_lock(&bp->hwrm_cmd_lock);
7119         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7120         if (rc)
7121                 goto err_recovery_out;
7122         fw_health->flags = le32_to_cpu(resp->flags);
7123         if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
7124             !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
7125                 rc = -EINVAL;
7126                 goto err_recovery_out;
7127         }
7128         fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
7129         fw_health->master_func_wait_dsecs =
7130                 le32_to_cpu(resp->master_func_wait_period);
7131         fw_health->normal_func_wait_dsecs =
7132                 le32_to_cpu(resp->normal_func_wait_period);
7133         fw_health->post_reset_wait_dsecs =
7134                 le32_to_cpu(resp->master_func_wait_period_after_reset);
7135         fw_health->post_reset_max_wait_dsecs =
7136                 le32_to_cpu(resp->max_bailout_time_after_reset);
7137         fw_health->regs[BNXT_FW_HEALTH_REG] =
7138                 le32_to_cpu(resp->fw_health_status_reg);
7139         fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
7140                 le32_to_cpu(resp->fw_heartbeat_reg);
7141         fw_health->regs[BNXT_FW_RESET_CNT_REG] =
7142                 le32_to_cpu(resp->fw_reset_cnt_reg);
7143         fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
7144                 le32_to_cpu(resp->reset_inprogress_reg);
7145         fw_health->fw_reset_inprog_reg_mask =
7146                 le32_to_cpu(resp->reset_inprogress_reg_mask);
7147         fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
7148         if (fw_health->fw_reset_seq_cnt >= 16) {
7149                 rc = -EINVAL;
7150                 goto err_recovery_out;
7151         }
7152         for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
7153                 fw_health->fw_reset_seq_regs[i] =
7154                         le32_to_cpu(resp->reset_reg[i]);
7155                 fw_health->fw_reset_seq_vals[i] =
7156                         le32_to_cpu(resp->reset_reg_val[i]);
7157                 fw_health->fw_reset_seq_delay_msec[i] =
7158                         resp->delay_after_reset[i];
7159         }
7160 err_recovery_out:
7161         mutex_unlock(&bp->hwrm_cmd_lock);
7162         if (!rc)
7163                 rc = bnxt_map_fw_health_regs(bp);
7164         if (rc)
7165                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7166         return rc;
7167 }
7168
7169 static int bnxt_hwrm_func_reset(struct bnxt *bp)
7170 {
7171         struct hwrm_func_reset_input req = {0};
7172
7173         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
7174         req.enables = 0;
7175
7176         return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
7177 }
7178
7179 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
7180 {
7181         int rc = 0;
7182         struct hwrm_queue_qportcfg_input req = {0};
7183         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
7184         u8 i, j, *qptr;
7185         bool no_rdma;
7186
7187         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
7188
7189         mutex_lock(&bp->hwrm_cmd_lock);
7190         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7191         if (rc)
7192                 goto qportcfg_exit;
7193
7194         if (!resp->max_configurable_queues) {
7195                 rc = -EINVAL;
7196                 goto qportcfg_exit;
7197         }
7198         bp->max_tc = resp->max_configurable_queues;
7199         bp->max_lltc = resp->max_configurable_lossless_queues;
7200         if (bp->max_tc > BNXT_MAX_QUEUE)
7201                 bp->max_tc = BNXT_MAX_QUEUE;
7202
7203         no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
7204         qptr = &resp->queue_id0;
7205         for (i = 0, j = 0; i < bp->max_tc; i++) {
7206                 bp->q_info[j].queue_id = *qptr;
7207                 bp->q_ids[i] = *qptr++;
7208                 bp->q_info[j].queue_profile = *qptr++;
7209                 bp->tc_to_qidx[j] = j;
7210                 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
7211                     (no_rdma && BNXT_PF(bp)))
7212                         j++;
7213         }
7214         bp->max_q = bp->max_tc;
7215         bp->max_tc = max_t(u8, j, 1);
7216
7217         if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
7218                 bp->max_tc = 1;
7219
7220         if (bp->max_lltc > bp->max_tc)
7221                 bp->max_lltc = bp->max_tc;
7222
7223 qportcfg_exit:
7224         mutex_unlock(&bp->hwrm_cmd_lock);
7225         return rc;
7226 }
7227
7228 static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent)
7229 {
7230         struct hwrm_ver_get_input req = {0};
7231         int rc;
7232
7233         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
7234         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
7235         req.hwrm_intf_min = HWRM_VERSION_MINOR;
7236         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
7237
7238         rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT,
7239                                    silent);
7240         return rc;
7241 }
7242
7243 static int bnxt_hwrm_ver_get(struct bnxt *bp)
7244 {
7245         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
7246         u32 dev_caps_cfg;
7247         int rc;
7248
7249         bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
7250         mutex_lock(&bp->hwrm_cmd_lock);
7251         rc = __bnxt_hwrm_ver_get(bp, false);
7252         if (rc)
7253                 goto hwrm_ver_get_exit;
7254
7255         memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
7256
7257         bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
7258                              resp->hwrm_intf_min_8b << 8 |
7259                              resp->hwrm_intf_upd_8b;
7260         if (resp->hwrm_intf_maj_8b < 1) {
7261                 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
7262                             resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
7263                             resp->hwrm_intf_upd_8b);
7264                 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
7265         }
7266         snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
7267                  resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
7268                  resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
7269
7270         if (strlen(resp->active_pkg_name)) {
7271                 int fw_ver_len = strlen(bp->fw_ver_str);
7272
7273                 snprintf(bp->fw_ver_str + fw_ver_len,
7274                          FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
7275                          resp->active_pkg_name);
7276                 bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
7277         }
7278
7279         bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
7280         if (!bp->hwrm_cmd_timeout)
7281                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
7282
7283         if (resp->hwrm_intf_maj_8b >= 1) {
7284                 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
7285                 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
7286         }
7287         if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
7288                 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
7289
7290         bp->chip_num = le16_to_cpu(resp->chip_num);
7291         if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
7292             !resp->chip_metal)
7293                 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
7294
7295         dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
7296         if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
7297             (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
7298                 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
7299
7300         if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
7301                 bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
7302
7303         if (dev_caps_cfg &
7304             VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
7305                 bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
7306
7307         if (dev_caps_cfg &
7308             VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
7309                 bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
7310
7311         if (dev_caps_cfg &
7312             VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
7313                 bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
7314
7315 hwrm_ver_get_exit:
7316         mutex_unlock(&bp->hwrm_cmd_lock);
7317         return rc;
7318 }
7319
7320 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
7321 {
7322         struct hwrm_fw_set_time_input req = {0};
7323         struct tm tm;
7324         time64_t now = ktime_get_real_seconds();
7325
7326         if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
7327             bp->hwrm_spec_code < 0x10400)
7328                 return -EOPNOTSUPP;
7329
7330         time64_to_tm(now, 0, &tm);
7331         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
7332         req.year = cpu_to_le16(1900 + tm.tm_year);
7333         req.month = 1 + tm.tm_mon;
7334         req.day = tm.tm_mday;
7335         req.hour = tm.tm_hour;
7336         req.minute = tm.tm_min;
7337         req.second = tm.tm_sec;
7338         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7339 }
7340
7341 static int bnxt_hwrm_port_qstats(struct bnxt *bp)
7342 {
7343         int rc;
7344         struct bnxt_pf_info *pf = &bp->pf;
7345         struct hwrm_port_qstats_input req = {0};
7346
7347         if (!(bp->flags & BNXT_FLAG_PORT_STATS))
7348                 return 0;
7349
7350         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
7351         req.port_id = cpu_to_le16(pf->port_id);
7352         req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
7353         req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
7354         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7355         return rc;
7356 }
7357
7358 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
7359 {
7360         struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
7361         struct hwrm_queue_pri2cos_qcfg_input req2 = {0};
7362         struct hwrm_port_qstats_ext_input req = {0};
7363         struct bnxt_pf_info *pf = &bp->pf;
7364         u32 tx_stat_size;
7365         int rc;
7366
7367         if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
7368                 return 0;
7369
7370         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
7371         req.port_id = cpu_to_le16(pf->port_id);
7372         req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
7373         req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
7374         tx_stat_size = bp->hw_tx_port_stats_ext ?
7375                        sizeof(*bp->hw_tx_port_stats_ext) : 0;
7376         req.tx_stat_size = cpu_to_le16(tx_stat_size);
7377         req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
7378         mutex_lock(&bp->hwrm_cmd_lock);
7379         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7380         if (!rc) {
7381                 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
7382                 bp->fw_tx_stats_ext_size = tx_stat_size ?
7383                         le16_to_cpu(resp->tx_stat_size) / 8 : 0;
7384         } else {
7385                 bp->fw_rx_stats_ext_size = 0;
7386                 bp->fw_tx_stats_ext_size = 0;
7387         }
7388         if (bp->fw_tx_stats_ext_size <=
7389             offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
7390                 mutex_unlock(&bp->hwrm_cmd_lock);
7391                 bp->pri2cos_valid = 0;
7392                 return rc;
7393         }
7394
7395         bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
7396         req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
7397
7398         rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT);
7399         if (!rc) {
7400                 struct hwrm_queue_pri2cos_qcfg_output *resp2;
7401                 u8 *pri2cos;
7402                 int i, j;
7403
7404                 resp2 = bp->hwrm_cmd_resp_addr;
7405                 pri2cos = &resp2->pri0_cos_queue_id;
7406                 for (i = 0; i < 8; i++) {
7407                         u8 queue_id = pri2cos[i];
7408
7409                         for (j = 0; j < bp->max_q; j++) {
7410                                 if (bp->q_ids[j] == queue_id)
7411                                         bp->pri2cos[i] = j;
7412                         }
7413                 }
7414                 bp->pri2cos_valid = 1;
7415         }
7416         mutex_unlock(&bp->hwrm_cmd_lock);
7417         return rc;
7418 }
7419
7420 static int bnxt_hwrm_pcie_qstats(struct bnxt *bp)
7421 {
7422         struct hwrm_pcie_qstats_input req = {0};
7423
7424         if (!(bp->flags & BNXT_FLAG_PCIE_STATS))
7425                 return 0;
7426
7427         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1);
7428         req.pcie_stat_size = cpu_to_le16(sizeof(struct pcie_ctx_hw_stats));
7429         req.pcie_stat_host_addr = cpu_to_le64(bp->hw_pcie_stats_map);
7430         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7431 }
7432
7433 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
7434 {
7435         if (bp->vxlan_port_cnt) {
7436                 bnxt_hwrm_tunnel_dst_port_free(
7437                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
7438         }
7439         bp->vxlan_port_cnt = 0;
7440         if (bp->nge_port_cnt) {
7441                 bnxt_hwrm_tunnel_dst_port_free(
7442                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
7443         }
7444         bp->nge_port_cnt = 0;
7445 }
7446
7447 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
7448 {
7449         int rc, i;
7450         u32 tpa_flags = 0;
7451
7452         if (set_tpa)
7453                 tpa_flags = bp->flags & BNXT_FLAG_TPA;
7454         else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
7455                 return 0;
7456         for (i = 0; i < bp->nr_vnics; i++) {
7457                 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
7458                 if (rc) {
7459                         netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
7460                                    i, rc);
7461                         return rc;
7462                 }
7463         }
7464         return 0;
7465 }
7466
7467 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
7468 {
7469         int i;
7470
7471         for (i = 0; i < bp->nr_vnics; i++)
7472                 bnxt_hwrm_vnic_set_rss(bp, i, false);
7473 }
7474
7475 static void bnxt_clear_vnic(struct bnxt *bp)
7476 {
7477         if (!bp->vnic_info)
7478                 return;
7479
7480         bnxt_hwrm_clear_vnic_filter(bp);
7481         if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
7482                 /* clear all RSS setting before free vnic ctx */
7483                 bnxt_hwrm_clear_vnic_rss(bp);
7484                 bnxt_hwrm_vnic_ctx_free(bp);
7485         }
7486         /* before free the vnic, undo the vnic tpa settings */
7487         if (bp->flags & BNXT_FLAG_TPA)
7488                 bnxt_set_tpa(bp, false);
7489         bnxt_hwrm_vnic_free(bp);
7490         if (bp->flags & BNXT_FLAG_CHIP_P5)
7491                 bnxt_hwrm_vnic_ctx_free(bp);
7492 }
7493
7494 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
7495                                     bool irq_re_init)
7496 {
7497         bnxt_clear_vnic(bp);
7498         bnxt_hwrm_ring_free(bp, close_path);
7499         bnxt_hwrm_ring_grp_free(bp);
7500         if (irq_re_init) {
7501                 bnxt_hwrm_stat_ctx_free(bp);
7502                 bnxt_hwrm_free_tunnel_ports(bp);
7503         }
7504 }
7505
7506 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
7507 {
7508         struct hwrm_func_cfg_input req = {0};
7509         int rc;
7510
7511         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7512         req.fid = cpu_to_le16(0xffff);
7513         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
7514         if (br_mode == BRIDGE_MODE_VEB)
7515                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
7516         else if (br_mode == BRIDGE_MODE_VEPA)
7517                 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
7518         else
7519                 return -EINVAL;
7520         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7521         return rc;
7522 }
7523
7524 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
7525 {
7526         struct hwrm_func_cfg_input req = {0};
7527         int rc;
7528
7529         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
7530                 return 0;
7531
7532         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
7533         req.fid = cpu_to_le16(0xffff);
7534         req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
7535         req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
7536         if (size == 128)
7537                 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
7538
7539         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7540         return rc;
7541 }
7542
7543 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7544 {
7545         struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
7546         int rc;
7547
7548         if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
7549                 goto skip_rss_ctx;
7550
7551         /* allocate context for vnic */
7552         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
7553         if (rc) {
7554                 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7555                            vnic_id, rc);
7556                 goto vnic_setup_err;
7557         }
7558         bp->rsscos_nr_ctxs++;
7559
7560         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7561                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
7562                 if (rc) {
7563                         netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
7564                                    vnic_id, rc);
7565                         goto vnic_setup_err;
7566                 }
7567                 bp->rsscos_nr_ctxs++;
7568         }
7569
7570 skip_rss_ctx:
7571         /* configure default vnic, ring grp */
7572         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7573         if (rc) {
7574                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7575                            vnic_id, rc);
7576                 goto vnic_setup_err;
7577         }
7578
7579         /* Enable RSS hashing on vnic */
7580         rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
7581         if (rc) {
7582                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
7583                            vnic_id, rc);
7584                 goto vnic_setup_err;
7585         }
7586
7587         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7588                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7589                 if (rc) {
7590                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7591                                    vnic_id, rc);
7592                 }
7593         }
7594
7595 vnic_setup_err:
7596         return rc;
7597 }
7598
7599 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
7600 {
7601         int rc, i, nr_ctxs;
7602
7603         nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
7604         for (i = 0; i < nr_ctxs; i++) {
7605                 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
7606                 if (rc) {
7607                         netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
7608                                    vnic_id, i, rc);
7609                         break;
7610                 }
7611                 bp->rsscos_nr_ctxs++;
7612         }
7613         if (i < nr_ctxs)
7614                 return -ENOMEM;
7615
7616         rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
7617         if (rc) {
7618                 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
7619                            vnic_id, rc);
7620                 return rc;
7621         }
7622         rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
7623         if (rc) {
7624                 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
7625                            vnic_id, rc);
7626                 return rc;
7627         }
7628         if (bp->flags & BNXT_FLAG_AGG_RINGS) {
7629                 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
7630                 if (rc) {
7631                         netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
7632                                    vnic_id, rc);
7633                 }
7634         }
7635         return rc;
7636 }
7637
7638 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
7639 {
7640         if (bp->flags & BNXT_FLAG_CHIP_P5)
7641                 return __bnxt_setup_vnic_p5(bp, vnic_id);
7642         else
7643                 return __bnxt_setup_vnic(bp, vnic_id);
7644 }
7645
7646 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
7647 {
7648 #ifdef CONFIG_RFS_ACCEL
7649         int i, rc = 0;
7650
7651         if (bp->flags & BNXT_FLAG_CHIP_P5)
7652                 return 0;
7653
7654         for (i = 0; i < bp->rx_nr_rings; i++) {
7655                 struct bnxt_vnic_info *vnic;
7656                 u16 vnic_id = i + 1;
7657                 u16 ring_id = i;
7658
7659                 if (vnic_id >= bp->nr_vnics)
7660                         break;
7661
7662                 vnic = &bp->vnic_info[vnic_id];
7663                 vnic->flags |= BNXT_VNIC_RFS_FLAG;
7664                 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
7665                         vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
7666                 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
7667                 if (rc) {
7668                         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
7669                                    vnic_id, rc);
7670                         break;
7671                 }
7672                 rc = bnxt_setup_vnic(bp, vnic_id);
7673                 if (rc)
7674                         break;
7675         }
7676         return rc;
7677 #else
7678         return 0;
7679 #endif
7680 }
7681
7682 /* Allow PF and VF with default VLAN to be in promiscuous mode */
7683 static bool bnxt_promisc_ok(struct bnxt *bp)
7684 {
7685 #ifdef CONFIG_BNXT_SRIOV
7686         if (BNXT_VF(bp) && !bp->vf.vlan)
7687                 return false;
7688 #endif
7689         return true;
7690 }
7691
7692 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
7693 {
7694         unsigned int rc = 0;
7695
7696         rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
7697         if (rc) {
7698                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7699                            rc);
7700                 return rc;
7701         }
7702
7703         rc = bnxt_hwrm_vnic_cfg(bp, 1);
7704         if (rc) {
7705                 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
7706                            rc);
7707                 return rc;
7708         }
7709         return rc;
7710 }
7711
7712 static int bnxt_cfg_rx_mode(struct bnxt *);
7713 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
7714
7715 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
7716 {
7717         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
7718         int rc = 0;
7719         unsigned int rx_nr_rings = bp->rx_nr_rings;
7720
7721         if (irq_re_init) {
7722                 rc = bnxt_hwrm_stat_ctx_alloc(bp);
7723                 if (rc) {
7724                         netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
7725                                    rc);
7726                         goto err_out;
7727                 }
7728         }
7729
7730         rc = bnxt_hwrm_ring_alloc(bp);
7731         if (rc) {
7732                 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
7733                 goto err_out;
7734         }
7735
7736         rc = bnxt_hwrm_ring_grp_alloc(bp);
7737         if (rc) {
7738                 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
7739                 goto err_out;
7740         }
7741
7742         if (BNXT_CHIP_TYPE_NITRO_A0(bp))
7743                 rx_nr_rings--;
7744
7745         /* default vnic 0 */
7746         rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
7747         if (rc) {
7748                 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
7749                 goto err_out;
7750         }
7751
7752         rc = bnxt_setup_vnic(bp, 0);
7753         if (rc)
7754                 goto err_out;
7755
7756         if (bp->flags & BNXT_FLAG_RFS) {
7757                 rc = bnxt_alloc_rfs_vnics(bp);
7758                 if (rc)
7759                         goto err_out;
7760         }
7761
7762         if (bp->flags & BNXT_FLAG_TPA) {
7763                 rc = bnxt_set_tpa(bp, true);
7764                 if (rc)
7765                         goto err_out;
7766         }
7767
7768         if (BNXT_VF(bp))
7769                 bnxt_update_vf_mac(bp);
7770
7771         /* Filter for default vnic 0 */
7772         rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
7773         if (rc) {
7774                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
7775                 goto err_out;
7776         }
7777         vnic->uc_filter_count = 1;
7778
7779         vnic->rx_mask = 0;
7780         if (bp->dev->flags & IFF_BROADCAST)
7781                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
7782
7783         if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7784                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
7785
7786         if (bp->dev->flags & IFF_ALLMULTI) {
7787                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
7788                 vnic->mc_list_count = 0;
7789         } else {
7790                 u32 mask = 0;
7791
7792                 bnxt_mc_list_updated(bp, &mask);
7793                 vnic->rx_mask |= mask;
7794         }
7795
7796         rc = bnxt_cfg_rx_mode(bp);
7797         if (rc)
7798                 goto err_out;
7799
7800         rc = bnxt_hwrm_set_coal(bp);
7801         if (rc)
7802                 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
7803                                 rc);
7804
7805         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7806                 rc = bnxt_setup_nitroa0_vnic(bp);
7807                 if (rc)
7808                         netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
7809                                    rc);
7810         }
7811
7812         if (BNXT_VF(bp)) {
7813                 bnxt_hwrm_func_qcfg(bp);
7814                 netdev_update_features(bp->dev);
7815         }
7816
7817         return 0;
7818
7819 err_out:
7820         bnxt_hwrm_resource_free(bp, 0, true);
7821
7822         return rc;
7823 }
7824
7825 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
7826 {
7827         bnxt_hwrm_resource_free(bp, 1, irq_re_init);
7828         return 0;
7829 }
7830
7831 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
7832 {
7833         bnxt_init_cp_rings(bp);
7834         bnxt_init_rx_rings(bp);
7835         bnxt_init_tx_rings(bp);
7836         bnxt_init_ring_grps(bp, irq_re_init);
7837         bnxt_init_vnics(bp);
7838
7839         return bnxt_init_chip(bp, irq_re_init);
7840 }
7841
7842 static int bnxt_set_real_num_queues(struct bnxt *bp)
7843 {
7844         int rc;
7845         struct net_device *dev = bp->dev;
7846
7847         rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
7848                                           bp->tx_nr_rings_xdp);
7849         if (rc)
7850                 return rc;
7851
7852         rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
7853         if (rc)
7854                 return rc;
7855
7856 #ifdef CONFIG_RFS_ACCEL
7857         if (bp->flags & BNXT_FLAG_RFS)
7858                 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
7859 #endif
7860
7861         return rc;
7862 }
7863
7864 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
7865                            bool shared)
7866 {
7867         int _rx = *rx, _tx = *tx;
7868
7869         if (shared) {
7870                 *rx = min_t(int, _rx, max);
7871                 *tx = min_t(int, _tx, max);
7872         } else {
7873                 if (max < 2)
7874                         return -ENOMEM;
7875
7876                 while (_rx + _tx > max) {
7877                         if (_rx > _tx && _rx > 1)
7878                                 _rx--;
7879                         else if (_tx > 1)
7880                                 _tx--;
7881                 }
7882                 *rx = _rx;
7883                 *tx = _tx;
7884         }
7885         return 0;
7886 }
7887
7888 static void bnxt_setup_msix(struct bnxt *bp)
7889 {
7890         const int len = sizeof(bp->irq_tbl[0].name);
7891         struct net_device *dev = bp->dev;
7892         int tcs, i;
7893
7894         tcs = netdev_get_num_tc(dev);
7895         if (tcs > 1) {
7896                 int i, off, count;
7897
7898                 for (i = 0; i < tcs; i++) {
7899                         count = bp->tx_nr_rings_per_tc;
7900                         off = i * count;
7901                         netdev_set_tc_queue(dev, i, count, off);
7902                 }
7903         }
7904
7905         for (i = 0; i < bp->cp_nr_rings; i++) {
7906                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7907                 char *attr;
7908
7909                 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
7910                         attr = "TxRx";
7911                 else if (i < bp->rx_nr_rings)
7912                         attr = "rx";
7913                 else
7914                         attr = "tx";
7915
7916                 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
7917                          attr, i);
7918                 bp->irq_tbl[map_idx].handler = bnxt_msix;
7919         }
7920 }
7921
7922 static void bnxt_setup_inta(struct bnxt *bp)
7923 {
7924         const int len = sizeof(bp->irq_tbl[0].name);
7925
7926         if (netdev_get_num_tc(bp->dev))
7927                 netdev_reset_tc(bp->dev);
7928
7929         snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
7930                  0);
7931         bp->irq_tbl[0].handler = bnxt_inta;
7932 }
7933
7934 static int bnxt_setup_int_mode(struct bnxt *bp)
7935 {
7936         int rc;
7937
7938         if (bp->flags & BNXT_FLAG_USING_MSIX)
7939                 bnxt_setup_msix(bp);
7940         else
7941                 bnxt_setup_inta(bp);
7942
7943         rc = bnxt_set_real_num_queues(bp);
7944         return rc;
7945 }
7946
7947 #ifdef CONFIG_RFS_ACCEL
7948 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
7949 {
7950         return bp->hw_resc.max_rsscos_ctxs;
7951 }
7952
7953 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
7954 {
7955         return bp->hw_resc.max_vnics;
7956 }
7957 #endif
7958
7959 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
7960 {
7961         return bp->hw_resc.max_stat_ctxs;
7962 }
7963
7964 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
7965 {
7966         return bp->hw_resc.max_cp_rings;
7967 }
7968
7969 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
7970 {
7971         unsigned int cp = bp->hw_resc.max_cp_rings;
7972
7973         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7974                 cp -= bnxt_get_ulp_msix_num(bp);
7975
7976         return cp;
7977 }
7978
7979 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
7980 {
7981         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7982
7983         if (bp->flags & BNXT_FLAG_CHIP_P5)
7984                 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
7985
7986         return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
7987 }
7988
7989 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
7990 {
7991         bp->hw_resc.max_irqs = max_irqs;
7992 }
7993
7994 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
7995 {
7996         unsigned int cp;
7997
7998         cp = bnxt_get_max_func_cp_rings_for_en(bp);
7999         if (bp->flags & BNXT_FLAG_CHIP_P5)
8000                 return cp - bp->rx_nr_rings - bp->tx_nr_rings;
8001         else
8002                 return cp - bp->cp_nr_rings;
8003 }
8004
8005 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
8006 {
8007         return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
8008 }
8009
8010 int bnxt_get_avail_msix(struct bnxt *bp, int num)
8011 {
8012         int max_cp = bnxt_get_max_func_cp_rings(bp);
8013         int max_irq = bnxt_get_max_func_irqs(bp);
8014         int total_req = bp->cp_nr_rings + num;
8015         int max_idx, avail_msix;
8016
8017         max_idx = bp->total_irqs;
8018         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8019                 max_idx = min_t(int, bp->total_irqs, max_cp);
8020         avail_msix = max_idx - bp->cp_nr_rings;
8021         if (!BNXT_NEW_RM(bp) || avail_msix >= num)
8022                 return avail_msix;
8023
8024         if (max_irq < total_req) {
8025                 num = max_irq - bp->cp_nr_rings;
8026                 if (num <= 0)
8027                         return 0;
8028         }
8029         return num;
8030 }
8031
8032 static int bnxt_get_num_msix(struct bnxt *bp)
8033 {
8034         if (!BNXT_NEW_RM(bp))
8035                 return bnxt_get_max_func_irqs(bp);
8036
8037         return bnxt_nq_rings_in_use(bp);
8038 }
8039
8040 static int bnxt_init_msix(struct bnxt *bp)
8041 {
8042         int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
8043         struct msix_entry *msix_ent;
8044
8045         total_vecs = bnxt_get_num_msix(bp);
8046         max = bnxt_get_max_func_irqs(bp);
8047         if (total_vecs > max)
8048                 total_vecs = max;
8049
8050         if (!total_vecs)
8051                 return 0;
8052
8053         msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
8054         if (!msix_ent)
8055                 return -ENOMEM;
8056
8057         for (i = 0; i < total_vecs; i++) {
8058                 msix_ent[i].entry = i;
8059                 msix_ent[i].vector = 0;
8060         }
8061
8062         if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
8063                 min = 2;
8064
8065         total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
8066         ulp_msix = bnxt_get_ulp_msix_num(bp);
8067         if (total_vecs < 0 || total_vecs < ulp_msix) {
8068                 rc = -ENODEV;
8069                 goto msix_setup_exit;
8070         }
8071
8072         bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
8073         if (bp->irq_tbl) {
8074                 for (i = 0; i < total_vecs; i++)
8075                         bp->irq_tbl[i].vector = msix_ent[i].vector;
8076
8077                 bp->total_irqs = total_vecs;
8078                 /* Trim rings based upon num of vectors allocated */
8079                 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
8080                                      total_vecs - ulp_msix, min == 1);
8081                 if (rc)
8082                         goto msix_setup_exit;
8083
8084                 bp->cp_nr_rings = (min == 1) ?
8085                                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
8086                                   bp->tx_nr_rings + bp->rx_nr_rings;
8087
8088         } else {
8089                 rc = -ENOMEM;
8090                 goto msix_setup_exit;
8091         }
8092         bp->flags |= BNXT_FLAG_USING_MSIX;
8093         kfree(msix_ent);
8094         return 0;
8095
8096 msix_setup_exit:
8097         netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
8098         kfree(bp->irq_tbl);
8099         bp->irq_tbl = NULL;
8100         pci_disable_msix(bp->pdev);
8101         kfree(msix_ent);
8102         return rc;
8103 }
8104
8105 static int bnxt_init_inta(struct bnxt *bp)
8106 {
8107         bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
8108         if (!bp->irq_tbl)
8109                 return -ENOMEM;
8110
8111         bp->total_irqs = 1;
8112         bp->rx_nr_rings = 1;
8113         bp->tx_nr_rings = 1;
8114         bp->cp_nr_rings = 1;
8115         bp->flags |= BNXT_FLAG_SHARED_RINGS;
8116         bp->irq_tbl[0].vector = bp->pdev->irq;
8117         return 0;
8118 }
8119
8120 static int bnxt_init_int_mode(struct bnxt *bp)
8121 {
8122         int rc = 0;
8123
8124         if (bp->flags & BNXT_FLAG_MSIX_CAP)
8125                 rc = bnxt_init_msix(bp);
8126
8127         if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
8128                 /* fallback to INTA */
8129                 rc = bnxt_init_inta(bp);
8130         }
8131         return rc;
8132 }
8133
8134 static void bnxt_clear_int_mode(struct bnxt *bp)
8135 {
8136         if (bp->flags & BNXT_FLAG_USING_MSIX)
8137                 pci_disable_msix(bp->pdev);
8138
8139         kfree(bp->irq_tbl);
8140         bp->irq_tbl = NULL;
8141         bp->flags &= ~BNXT_FLAG_USING_MSIX;
8142 }
8143
8144 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
8145 {
8146         int tcs = netdev_get_num_tc(bp->dev);
8147         bool irq_cleared = false;
8148         int rc;
8149
8150         if (!bnxt_need_reserve_rings(bp))
8151                 return 0;
8152
8153         if (irq_re_init && BNXT_NEW_RM(bp) &&
8154             bnxt_get_num_msix(bp) != bp->total_irqs) {
8155                 bnxt_ulp_irq_stop(bp);
8156                 bnxt_clear_int_mode(bp);
8157                 irq_cleared = true;
8158         }
8159         rc = __bnxt_reserve_rings(bp);
8160         if (irq_cleared) {
8161                 if (!rc)
8162                         rc = bnxt_init_int_mode(bp);
8163                 bnxt_ulp_irq_restart(bp, rc);
8164         }
8165         if (rc) {
8166                 netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
8167                 return rc;
8168         }
8169         if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
8170                 netdev_err(bp->dev, "tx ring reservation failure\n");
8171                 netdev_reset_tc(bp->dev);
8172                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8173                 return -ENOMEM;
8174         }
8175         return 0;
8176 }
8177
8178 static void bnxt_free_irq(struct bnxt *bp)
8179 {
8180         struct bnxt_irq *irq;
8181         int i;
8182
8183 #ifdef CONFIG_RFS_ACCEL
8184         free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
8185         bp->dev->rx_cpu_rmap = NULL;
8186 #endif
8187         if (!bp->irq_tbl || !bp->bnapi)
8188                 return;
8189
8190         for (i = 0; i < bp->cp_nr_rings; i++) {
8191                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8192
8193                 irq = &bp->irq_tbl[map_idx];
8194                 if (irq->requested) {
8195                         if (irq->have_cpumask) {
8196                                 irq_set_affinity_hint(irq->vector, NULL);
8197                                 free_cpumask_var(irq->cpu_mask);
8198                                 irq->have_cpumask = 0;
8199                         }
8200                         free_irq(irq->vector, bp->bnapi[i]);
8201                 }
8202
8203                 irq->requested = 0;
8204         }
8205 }
8206
8207 static int bnxt_request_irq(struct bnxt *bp)
8208 {
8209         int i, j, rc = 0;
8210         unsigned long flags = 0;
8211 #ifdef CONFIG_RFS_ACCEL
8212         struct cpu_rmap *rmap;
8213 #endif
8214
8215         rc = bnxt_setup_int_mode(bp);
8216         if (rc) {
8217                 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
8218                            rc);
8219                 return rc;
8220         }
8221 #ifdef CONFIG_RFS_ACCEL
8222         rmap = bp->dev->rx_cpu_rmap;
8223 #endif
8224         if (!(bp->flags & BNXT_FLAG_USING_MSIX))
8225                 flags = IRQF_SHARED;
8226
8227         for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
8228                 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8229                 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
8230
8231 #ifdef CONFIG_RFS_ACCEL
8232                 if (rmap && bp->bnapi[i]->rx_ring) {
8233                         rc = irq_cpu_rmap_add(rmap, irq->vector);
8234                         if (rc)
8235                                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
8236                                             j);
8237                         j++;
8238                 }
8239 #endif
8240                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
8241                                  bp->bnapi[i]);
8242                 if (rc)
8243                         break;
8244
8245                 irq->requested = 1;
8246
8247                 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
8248                         int numa_node = dev_to_node(&bp->pdev->dev);
8249
8250                         irq->have_cpumask = 1;
8251                         cpumask_set_cpu(cpumask_local_spread(i, numa_node),
8252                                         irq->cpu_mask);
8253                         rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
8254                         if (rc) {
8255                                 netdev_warn(bp->dev,
8256                                             "Set affinity failed, IRQ = %d\n",
8257                                             irq->vector);
8258                                 break;
8259                         }
8260                 }
8261         }
8262         return rc;
8263 }
8264
8265 static void bnxt_del_napi(struct bnxt *bp)
8266 {
8267         int i;
8268
8269         if (!bp->bnapi)
8270                 return;
8271
8272         for (i = 0; i < bp->cp_nr_rings; i++) {
8273                 struct bnxt_napi *bnapi = bp->bnapi[i];
8274
8275                 napi_hash_del(&bnapi->napi);
8276                 netif_napi_del(&bnapi->napi);
8277         }
8278         /* We called napi_hash_del() before netif_napi_del(), we need
8279          * to respect an RCU grace period before freeing napi structures.
8280          */
8281         synchronize_net();
8282 }
8283
8284 static void bnxt_init_napi(struct bnxt *bp)
8285 {
8286         int i;
8287         unsigned int cp_nr_rings = bp->cp_nr_rings;
8288         struct bnxt_napi *bnapi;
8289
8290         if (bp->flags & BNXT_FLAG_USING_MSIX) {
8291                 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
8292
8293                 if (bp->flags & BNXT_FLAG_CHIP_P5)
8294                         poll_fn = bnxt_poll_p5;
8295                 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8296                         cp_nr_rings--;
8297                 for (i = 0; i < cp_nr_rings; i++) {
8298                         bnapi = bp->bnapi[i];
8299                         netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
8300                 }
8301                 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8302                         bnapi = bp->bnapi[cp_nr_rings];
8303                         netif_napi_add(bp->dev, &bnapi->napi,
8304                                        bnxt_poll_nitroa0, 64);
8305                 }
8306         } else {
8307                 bnapi = bp->bnapi[0];
8308                 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
8309         }
8310 }
8311
8312 static void bnxt_disable_napi(struct bnxt *bp)
8313 {
8314         int i;
8315
8316         if (!bp->bnapi)
8317                 return;
8318
8319         for (i = 0; i < bp->cp_nr_rings; i++) {
8320                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8321
8322                 if (bp->bnapi[i]->rx_ring)
8323                         cancel_work_sync(&cpr->dim.work);
8324
8325                 napi_disable(&bp->bnapi[i]->napi);
8326         }
8327 }
8328
8329 static void bnxt_enable_napi(struct bnxt *bp)
8330 {
8331         int i;
8332
8333         for (i = 0; i < bp->cp_nr_rings; i++) {
8334                 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
8335                 bp->bnapi[i]->in_reset = false;
8336
8337                 if (bp->bnapi[i]->rx_ring) {
8338                         INIT_WORK(&cpr->dim.work, bnxt_dim_work);
8339                         cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
8340                 }
8341                 napi_enable(&bp->bnapi[i]->napi);
8342         }
8343 }
8344
8345 void bnxt_tx_disable(struct bnxt *bp)
8346 {
8347         int i;
8348         struct bnxt_tx_ring_info *txr;
8349
8350         if (bp->tx_ring) {
8351                 for (i = 0; i < bp->tx_nr_rings; i++) {
8352                         txr = &bp->tx_ring[i];
8353                         txr->dev_state = BNXT_DEV_STATE_CLOSING;
8354                 }
8355         }
8356         /* Stop all TX queues */
8357         netif_tx_disable(bp->dev);
8358         netif_carrier_off(bp->dev);
8359 }
8360
8361 void bnxt_tx_enable(struct bnxt *bp)
8362 {
8363         int i;
8364         struct bnxt_tx_ring_info *txr;
8365
8366         for (i = 0; i < bp->tx_nr_rings; i++) {
8367                 txr = &bp->tx_ring[i];
8368                 txr->dev_state = 0;
8369         }
8370         netif_tx_wake_all_queues(bp->dev);
8371         if (bp->link_info.link_up)
8372                 netif_carrier_on(bp->dev);
8373 }
8374
8375 static void bnxt_report_link(struct bnxt *bp)
8376 {
8377         if (bp->link_info.link_up) {
8378                 const char *duplex;
8379                 const char *flow_ctrl;
8380                 u32 speed;
8381                 u16 fec;
8382
8383                 netif_carrier_on(bp->dev);
8384                 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
8385                         duplex = "full";
8386                 else
8387                         duplex = "half";
8388                 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
8389                         flow_ctrl = "ON - receive & transmit";
8390                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
8391                         flow_ctrl = "ON - transmit";
8392                 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
8393                         flow_ctrl = "ON - receive";
8394                 else
8395                         flow_ctrl = "none";
8396                 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
8397                 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
8398                             speed, duplex, flow_ctrl);
8399                 if (bp->flags & BNXT_FLAG_EEE_CAP)
8400                         netdev_info(bp->dev, "EEE is %s\n",
8401                                     bp->eee.eee_active ? "active" :
8402                                                          "not active");
8403                 fec = bp->link_info.fec_cfg;
8404                 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
8405                         netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
8406                                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
8407                                     (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
8408                                      (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
8409         } else {
8410                 netif_carrier_off(bp->dev);
8411                 netdev_err(bp->dev, "NIC Link is Down\n");
8412         }
8413 }
8414
8415 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
8416 {
8417         int rc = 0;
8418         struct hwrm_port_phy_qcaps_input req = {0};
8419         struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8420         struct bnxt_link_info *link_info = &bp->link_info;
8421
8422         bp->flags &= ~BNXT_FLAG_EEE_CAP;
8423         if (bp->test_info)
8424                 bp->test_info->flags &= ~(BNXT_TEST_FL_EXT_LPBK |
8425                                           BNXT_TEST_FL_AN_PHY_LPBK);
8426         if (bp->hwrm_spec_code < 0x10201)
8427                 return 0;
8428
8429         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
8430
8431         mutex_lock(&bp->hwrm_cmd_lock);
8432         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8433         if (rc)
8434                 goto hwrm_phy_qcaps_exit;
8435
8436         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
8437                 struct ethtool_eee *eee = &bp->eee;
8438                 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
8439
8440                 bp->flags |= BNXT_FLAG_EEE_CAP;
8441                 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8442                 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
8443                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
8444                 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
8445                                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
8446         }
8447         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
8448                 if (bp->test_info)
8449                         bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
8450         }
8451         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED) {
8452                 if (bp->test_info)
8453                         bp->test_info->flags |= BNXT_TEST_FL_AN_PHY_LPBK;
8454         }
8455         if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED) {
8456                 if (BNXT_PF(bp))
8457                         bp->fw_cap |= BNXT_FW_CAP_SHARED_PORT_CFG;
8458         }
8459         if (resp->supported_speeds_auto_mode)
8460                 link_info->support_auto_speeds =
8461                         le16_to_cpu(resp->supported_speeds_auto_mode);
8462
8463         bp->port_count = resp->port_cnt;
8464
8465 hwrm_phy_qcaps_exit:
8466         mutex_unlock(&bp->hwrm_cmd_lock);
8467         return rc;
8468 }
8469
8470 static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
8471 {
8472         int rc = 0;
8473         struct bnxt_link_info *link_info = &bp->link_info;
8474         struct hwrm_port_phy_qcfg_input req = {0};
8475         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8476         u8 link_up = link_info->link_up;
8477         u16 diff;
8478
8479         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
8480
8481         mutex_lock(&bp->hwrm_cmd_lock);
8482         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8483         if (rc) {
8484                 mutex_unlock(&bp->hwrm_cmd_lock);
8485                 return rc;
8486         }
8487
8488         memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
8489         link_info->phy_link_status = resp->link;
8490         link_info->duplex = resp->duplex_cfg;
8491         if (bp->hwrm_spec_code >= 0x10800)
8492                 link_info->duplex = resp->duplex_state;
8493         link_info->pause = resp->pause;
8494         link_info->auto_mode = resp->auto_mode;
8495         link_info->auto_pause_setting = resp->auto_pause;
8496         link_info->lp_pause = resp->link_partner_adv_pause;
8497         link_info->force_pause_setting = resp->force_pause;
8498         link_info->duplex_setting = resp->duplex_cfg;
8499         if (link_info->phy_link_status == BNXT_LINK_LINK)
8500                 link_info->link_speed = le16_to_cpu(resp->link_speed);
8501         else
8502                 link_info->link_speed = 0;
8503         link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
8504         link_info->support_speeds = le16_to_cpu(resp->support_speeds);
8505         link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
8506         link_info->lp_auto_link_speeds =
8507                 le16_to_cpu(resp->link_partner_adv_speeds);
8508         link_info->preemphasis = le32_to_cpu(resp->preemphasis);
8509         link_info->phy_ver[0] = resp->phy_maj;
8510         link_info->phy_ver[1] = resp->phy_min;
8511         link_info->phy_ver[2] = resp->phy_bld;
8512         link_info->media_type = resp->media_type;
8513         link_info->phy_type = resp->phy_type;
8514         link_info->transceiver = resp->xcvr_pkg_type;
8515         link_info->phy_addr = resp->eee_config_phy_addr &
8516                               PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
8517         link_info->module_status = resp->module_status;
8518
8519         if (bp->flags & BNXT_FLAG_EEE_CAP) {
8520                 struct ethtool_eee *eee = &bp->eee;
8521                 u16 fw_speeds;
8522
8523                 eee->eee_active = 0;
8524                 if (resp->eee_config_phy_addr &
8525                     PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
8526                         eee->eee_active = 1;
8527                         fw_speeds = le16_to_cpu(
8528                                 resp->link_partner_adv_eee_link_speed_mask);
8529                         eee->lp_advertised =
8530                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8531                 }
8532
8533                 /* Pull initial EEE config */
8534                 if (!chng_link_state) {
8535                         if (resp->eee_config_phy_addr &
8536                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
8537                                 eee->eee_enabled = 1;
8538
8539                         fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
8540                         eee->advertised =
8541                                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
8542
8543                         if (resp->eee_config_phy_addr &
8544                             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
8545                                 __le32 tmr;
8546
8547                                 eee->tx_lpi_enabled = 1;
8548                                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
8549                                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
8550                                         PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
8551                         }
8552                 }
8553         }
8554
8555         link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
8556         if (bp->hwrm_spec_code >= 0x10504)
8557                 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
8558
8559         /* TODO: need to add more logic to report VF link */
8560         if (chng_link_state) {
8561                 if (link_info->phy_link_status == BNXT_LINK_LINK)
8562                         link_info->link_up = 1;
8563                 else
8564                         link_info->link_up = 0;
8565                 if (link_up != link_info->link_up)
8566                         bnxt_report_link(bp);
8567         } else {
8568                 /* alwasy link down if not require to update link state */
8569                 link_info->link_up = 0;
8570         }
8571         mutex_unlock(&bp->hwrm_cmd_lock);
8572
8573         if (!BNXT_PHY_CFG_ABLE(bp))
8574                 return 0;
8575
8576         diff = link_info->support_auto_speeds ^ link_info->advertising;
8577         if ((link_info->support_auto_speeds | diff) !=
8578             link_info->support_auto_speeds) {
8579                 /* An advertised speed is no longer supported, so we need to
8580                  * update the advertisement settings.  Caller holds RTNL
8581                  * so we can modify link settings.
8582                  */
8583                 link_info->advertising = link_info->support_auto_speeds;
8584                 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
8585                         bnxt_hwrm_set_link_setting(bp, true, false);
8586         }
8587         return 0;
8588 }
8589
8590 static void bnxt_get_port_module_status(struct bnxt *bp)
8591 {
8592         struct bnxt_link_info *link_info = &bp->link_info;
8593         struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
8594         u8 module_status;
8595
8596         if (bnxt_update_link(bp, true))
8597                 return;
8598
8599         module_status = link_info->module_status;
8600         switch (module_status) {
8601         case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
8602         case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
8603         case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
8604                 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
8605                             bp->pf.port_id);
8606                 if (bp->hwrm_spec_code >= 0x10201) {
8607                         netdev_warn(bp->dev, "Module part number %s\n",
8608                                     resp->phy_vendor_partnumber);
8609                 }
8610                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
8611                         netdev_warn(bp->dev, "TX is disabled\n");
8612                 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
8613                         netdev_warn(bp->dev, "SFP+ module is shutdown\n");
8614         }
8615 }
8616
8617 static void
8618 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
8619 {
8620         if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
8621                 if (bp->hwrm_spec_code >= 0x10201)
8622                         req->auto_pause =
8623                                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
8624                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8625                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
8626                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8627                         req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
8628                 req->enables |=
8629                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8630         } else {
8631                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
8632                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
8633                 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
8634                         req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
8635                 req->enables |=
8636                         cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
8637                 if (bp->hwrm_spec_code >= 0x10201) {
8638                         req->auto_pause = req->force_pause;
8639                         req->enables |= cpu_to_le32(
8640                                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
8641                 }
8642         }
8643 }
8644
8645 static void bnxt_hwrm_set_link_common(struct bnxt *bp,
8646                                       struct hwrm_port_phy_cfg_input *req)
8647 {
8648         u8 autoneg = bp->link_info.autoneg;
8649         u16 fw_link_speed = bp->link_info.req_link_speed;
8650         u16 advertising = bp->link_info.advertising;
8651
8652         if (autoneg & BNXT_AUTONEG_SPEED) {
8653                 req->auto_mode |=
8654                         PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
8655
8656                 req->enables |= cpu_to_le32(
8657                         PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
8658                 req->auto_link_speed_mask = cpu_to_le16(advertising);
8659
8660                 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
8661                 req->flags |=
8662                         cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
8663         } else {
8664                 req->force_link_speed = cpu_to_le16(fw_link_speed);
8665                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
8666         }
8667
8668         /* tell chimp that the setting takes effect immediately */
8669         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
8670 }
8671
8672 int bnxt_hwrm_set_pause(struct bnxt *bp)
8673 {
8674         struct hwrm_port_phy_cfg_input req = {0};
8675         int rc;
8676
8677         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8678         bnxt_hwrm_set_pause_common(bp, &req);
8679
8680         if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
8681             bp->link_info.force_link_chng)
8682                 bnxt_hwrm_set_link_common(bp, &req);
8683
8684         mutex_lock(&bp->hwrm_cmd_lock);
8685         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8686         if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
8687                 /* since changing of pause setting doesn't trigger any link
8688                  * change event, the driver needs to update the current pause
8689                  * result upon successfully return of the phy_cfg command
8690                  */
8691                 bp->link_info.pause =
8692                 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
8693                 bp->link_info.auto_pause_setting = 0;
8694                 if (!bp->link_info.force_link_chng)
8695                         bnxt_report_link(bp);
8696         }
8697         bp->link_info.force_link_chng = false;
8698         mutex_unlock(&bp->hwrm_cmd_lock);
8699         return rc;
8700 }
8701
8702 static void bnxt_hwrm_set_eee(struct bnxt *bp,
8703                               struct hwrm_port_phy_cfg_input *req)
8704 {
8705         struct ethtool_eee *eee = &bp->eee;
8706
8707         if (eee->eee_enabled) {
8708                 u16 eee_speeds;
8709                 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
8710
8711                 if (eee->tx_lpi_enabled)
8712                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
8713                 else
8714                         flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
8715
8716                 req->flags |= cpu_to_le32(flags);
8717                 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
8718                 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
8719                 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
8720         } else {
8721                 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
8722         }
8723 }
8724
8725 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
8726 {
8727         struct hwrm_port_phy_cfg_input req = {0};
8728
8729         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8730         if (set_pause)
8731                 bnxt_hwrm_set_pause_common(bp, &req);
8732
8733         bnxt_hwrm_set_link_common(bp, &req);
8734
8735         if (set_eee)
8736                 bnxt_hwrm_set_eee(bp, &req);
8737         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8738 }
8739
8740 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
8741 {
8742         struct hwrm_port_phy_cfg_input req = {0};
8743
8744         if (!BNXT_SINGLE_PF(bp))
8745                 return 0;
8746
8747         if (pci_num_vf(bp->pdev))
8748                 return 0;
8749
8750         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
8751         req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
8752         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8753 }
8754
8755 static int bnxt_fw_init_one(struct bnxt *bp);
8756
8757 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
8758 {
8759         struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
8760         struct hwrm_func_drv_if_change_input req = {0};
8761         bool resc_reinit = false, fw_reset = false;
8762         u32 flags = 0;
8763         int rc;
8764
8765         if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
8766                 return 0;
8767
8768         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
8769         if (up)
8770                 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
8771         mutex_lock(&bp->hwrm_cmd_lock);
8772         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8773         if (!rc)
8774                 flags = le32_to_cpu(resp->flags);
8775         mutex_unlock(&bp->hwrm_cmd_lock);
8776         if (rc)
8777                 return rc;
8778
8779         if (!up)
8780                 return 0;
8781
8782         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
8783                 resc_reinit = true;
8784         if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE)
8785                 fw_reset = true;
8786
8787         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
8788                 netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
8789                 return -ENODEV;
8790         }
8791         if (resc_reinit || fw_reset) {
8792                 if (fw_reset) {
8793                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
8794                                 bnxt_ulp_stop(bp);
8795                         bnxt_free_ctx_mem(bp);
8796                         kfree(bp->ctx);
8797                         bp->ctx = NULL;
8798                         rc = bnxt_fw_init_one(bp);
8799                         if (rc) {
8800                                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
8801                                 return rc;
8802                         }
8803                         bnxt_clear_int_mode(bp);
8804                         rc = bnxt_init_int_mode(bp);
8805                         if (rc) {
8806                                 netdev_err(bp->dev, "init int mode failed\n");
8807                                 return rc;
8808                         }
8809                         set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
8810                 }
8811                 if (BNXT_NEW_RM(bp)) {
8812                         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8813
8814                         rc = bnxt_hwrm_func_resc_qcaps(bp, true);
8815                         hw_resc->resv_cp_rings = 0;
8816                         hw_resc->resv_stat_ctxs = 0;
8817                         hw_resc->resv_irqs = 0;
8818                         hw_resc->resv_tx_rings = 0;
8819                         hw_resc->resv_rx_rings = 0;
8820                         hw_resc->resv_hw_ring_grps = 0;
8821                         hw_resc->resv_vnics = 0;
8822                         if (!fw_reset) {
8823                                 bp->tx_nr_rings = 0;
8824                                 bp->rx_nr_rings = 0;
8825                         }
8826                 }
8827         }
8828         return 0;
8829 }
8830
8831 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
8832 {
8833         struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
8834         struct hwrm_port_led_qcaps_input req = {0};
8835         struct bnxt_pf_info *pf = &bp->pf;
8836         int rc;
8837
8838         bp->num_leds = 0;
8839         if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
8840                 return 0;
8841
8842         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
8843         req.port_id = cpu_to_le16(pf->port_id);
8844         mutex_lock(&bp->hwrm_cmd_lock);
8845         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8846         if (rc) {
8847                 mutex_unlock(&bp->hwrm_cmd_lock);
8848                 return rc;
8849         }
8850         if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
8851                 int i;
8852
8853                 bp->num_leds = resp->num_leds;
8854                 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
8855                                                  bp->num_leds);
8856                 for (i = 0; i < bp->num_leds; i++) {
8857                         struct bnxt_led_info *led = &bp->leds[i];
8858                         __le16 caps = led->led_state_caps;
8859
8860                         if (!led->led_group_id ||
8861                             !BNXT_LED_ALT_BLINK_CAP(caps)) {
8862                                 bp->num_leds = 0;
8863                                 break;
8864                         }
8865                 }
8866         }
8867         mutex_unlock(&bp->hwrm_cmd_lock);
8868         return 0;
8869 }
8870
8871 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
8872 {
8873         struct hwrm_wol_filter_alloc_input req = {0};
8874         struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
8875         int rc;
8876
8877         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
8878         req.port_id = cpu_to_le16(bp->pf.port_id);
8879         req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
8880         req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
8881         memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
8882         mutex_lock(&bp->hwrm_cmd_lock);
8883         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8884         if (!rc)
8885                 bp->wol_filter_id = resp->wol_filter_id;
8886         mutex_unlock(&bp->hwrm_cmd_lock);
8887         return rc;
8888 }
8889
8890 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
8891 {
8892         struct hwrm_wol_filter_free_input req = {0};
8893         int rc;
8894
8895         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
8896         req.port_id = cpu_to_le16(bp->pf.port_id);
8897         req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
8898         req.wol_filter_id = bp->wol_filter_id;
8899         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8900         return rc;
8901 }
8902
8903 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
8904 {
8905         struct hwrm_wol_filter_qcfg_input req = {0};
8906         struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
8907         u16 next_handle = 0;
8908         int rc;
8909
8910         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
8911         req.port_id = cpu_to_le16(bp->pf.port_id);
8912         req.handle = cpu_to_le16(handle);
8913         mutex_lock(&bp->hwrm_cmd_lock);
8914         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8915         if (!rc) {
8916                 next_handle = le16_to_cpu(resp->next_handle);
8917                 if (next_handle != 0) {
8918                         if (resp->wol_type ==
8919                             WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
8920                                 bp->wol = 1;
8921                                 bp->wol_filter_id = resp->wol_filter_id;
8922                         }
8923                 }
8924         }
8925         mutex_unlock(&bp->hwrm_cmd_lock);
8926         return next_handle;
8927 }
8928
8929 static void bnxt_get_wol_settings(struct bnxt *bp)
8930 {
8931         u16 handle = 0;
8932
8933         bp->wol = 0;
8934         if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
8935                 return;
8936
8937         do {
8938                 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
8939         } while (handle && handle != 0xffff);
8940 }
8941
8942 #ifdef CONFIG_BNXT_HWMON
8943 static ssize_t bnxt_show_temp(struct device *dev,
8944                               struct device_attribute *devattr, char *buf)
8945 {
8946         struct hwrm_temp_monitor_query_input req = {0};
8947         struct hwrm_temp_monitor_query_output *resp;
8948         struct bnxt *bp = dev_get_drvdata(dev);
8949         u32 temp = 0;
8950
8951         resp = bp->hwrm_cmd_resp_addr;
8952         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
8953         mutex_lock(&bp->hwrm_cmd_lock);
8954         if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
8955                 temp = resp->temp * 1000; /* display millidegree */
8956         mutex_unlock(&bp->hwrm_cmd_lock);
8957
8958         return sprintf(buf, "%u\n", temp);
8959 }
8960 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
8961
8962 static struct attribute *bnxt_attrs[] = {
8963         &sensor_dev_attr_temp1_input.dev_attr.attr,
8964         NULL
8965 };
8966 ATTRIBUTE_GROUPS(bnxt);
8967
8968 static void bnxt_hwmon_close(struct bnxt *bp)
8969 {
8970         if (bp->hwmon_dev) {
8971                 hwmon_device_unregister(bp->hwmon_dev);
8972                 bp->hwmon_dev = NULL;
8973         }
8974 }
8975
8976 static void bnxt_hwmon_open(struct bnxt *bp)
8977 {
8978         struct pci_dev *pdev = bp->pdev;
8979
8980         if (bp->hwmon_dev)
8981                 return;
8982
8983         bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
8984                                                           DRV_MODULE_NAME, bp,
8985                                                           bnxt_groups);
8986         if (IS_ERR(bp->hwmon_dev)) {
8987                 bp->hwmon_dev = NULL;
8988                 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
8989         }
8990 }
8991 #else
8992 static void bnxt_hwmon_close(struct bnxt *bp)
8993 {
8994 }
8995
8996 static void bnxt_hwmon_open(struct bnxt *bp)
8997 {
8998 }
8999 #endif
9000
9001 static bool bnxt_eee_config_ok(struct bnxt *bp)
9002 {
9003         struct ethtool_eee *eee = &bp->eee;
9004         struct bnxt_link_info *link_info = &bp->link_info;
9005
9006         if (!(bp->flags & BNXT_FLAG_EEE_CAP))
9007                 return true;
9008
9009         if (eee->eee_enabled) {
9010                 u32 advertising =
9011                         _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
9012
9013                 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9014                         eee->eee_enabled = 0;
9015                         return false;
9016                 }
9017                 if (eee->advertised & ~advertising) {
9018                         eee->advertised = advertising & eee->supported;
9019                         return false;
9020                 }
9021         }
9022         return true;
9023 }
9024
9025 static int bnxt_update_phy_setting(struct bnxt *bp)
9026 {
9027         int rc;
9028         bool update_link = false;
9029         bool update_pause = false;
9030         bool update_eee = false;
9031         struct bnxt_link_info *link_info = &bp->link_info;
9032
9033         rc = bnxt_update_link(bp, true);
9034         if (rc) {
9035                 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
9036                            rc);
9037                 return rc;
9038         }
9039         if (!BNXT_SINGLE_PF(bp))
9040                 return 0;
9041
9042         if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9043             (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
9044             link_info->req_flow_ctrl)
9045                 update_pause = true;
9046         if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
9047             link_info->force_pause_setting != link_info->req_flow_ctrl)
9048                 update_pause = true;
9049         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
9050                 if (BNXT_AUTO_MODE(link_info->auto_mode))
9051                         update_link = true;
9052                 if (link_info->req_link_speed != link_info->force_link_speed)
9053                         update_link = true;
9054                 if (link_info->req_duplex != link_info->duplex_setting)
9055                         update_link = true;
9056         } else {
9057                 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
9058                         update_link = true;
9059                 if (link_info->advertising != link_info->auto_link_speeds)
9060                         update_link = true;
9061         }
9062
9063         /* The last close may have shutdown the link, so need to call
9064          * PHY_CFG to bring it back up.
9065          */
9066         if (!netif_carrier_ok(bp->dev))
9067                 update_link = true;
9068
9069         if (!bnxt_eee_config_ok(bp))
9070                 update_eee = true;
9071
9072         if (update_link)
9073                 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
9074         else if (update_pause)
9075                 rc = bnxt_hwrm_set_pause(bp);
9076         if (rc) {
9077                 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
9078                            rc);
9079                 return rc;
9080         }
9081
9082         return rc;
9083 }
9084
9085 /* Common routine to pre-map certain register block to different GRC window.
9086  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
9087  * in PF and 3 windows in VF that can be customized to map in different
9088  * register blocks.
9089  */
9090 static void bnxt_preset_reg_win(struct bnxt *bp)
9091 {
9092         if (BNXT_PF(bp)) {
9093                 /* CAG registers map to GRC window #4 */
9094                 writel(BNXT_CAG_REG_BASE,
9095                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
9096         }
9097 }
9098
9099 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
9100
9101 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9102 {
9103         int rc = 0;
9104
9105         bnxt_preset_reg_win(bp);
9106         netif_carrier_off(bp->dev);
9107         if (irq_re_init) {
9108                 /* Reserve rings now if none were reserved at driver probe. */
9109                 rc = bnxt_init_dflt_ring_mode(bp);
9110                 if (rc) {
9111                         netdev_err(bp->dev, "Failed to reserve default rings at open\n");
9112                         return rc;
9113                 }
9114         }
9115         rc = bnxt_reserve_rings(bp, irq_re_init);
9116         if (rc)
9117                 return rc;
9118         if ((bp->flags & BNXT_FLAG_RFS) &&
9119             !(bp->flags & BNXT_FLAG_USING_MSIX)) {
9120                 /* disable RFS if falling back to INTA */
9121                 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
9122                 bp->flags &= ~BNXT_FLAG_RFS;
9123         }
9124
9125         rc = bnxt_alloc_mem(bp, irq_re_init);
9126         if (rc) {
9127                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9128                 goto open_err_free_mem;
9129         }
9130
9131         if (irq_re_init) {
9132                 bnxt_init_napi(bp);
9133                 rc = bnxt_request_irq(bp);
9134                 if (rc) {
9135                         netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
9136                         goto open_err_irq;
9137                 }
9138         }
9139
9140         bnxt_enable_napi(bp);
9141         bnxt_debug_dev_init(bp);
9142
9143         rc = bnxt_init_nic(bp, irq_re_init);
9144         if (rc) {
9145                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9146                 goto open_err;
9147         }
9148
9149         if (link_re_init) {
9150                 mutex_lock(&bp->link_lock);
9151                 rc = bnxt_update_phy_setting(bp);
9152                 mutex_unlock(&bp->link_lock);
9153                 if (rc) {
9154                         netdev_warn(bp->dev, "failed to update phy settings\n");
9155                         if (BNXT_SINGLE_PF(bp)) {
9156                                 bp->link_info.phy_retry = true;
9157                                 bp->link_info.phy_retry_expires =
9158                                         jiffies + 5 * HZ;
9159                         }
9160                 }
9161         }
9162
9163         if (irq_re_init)
9164                 udp_tunnel_get_rx_info(bp->dev);
9165
9166         set_bit(BNXT_STATE_OPEN, &bp->state);
9167         bnxt_enable_int(bp);
9168         /* Enable TX queues */
9169         bnxt_tx_enable(bp);
9170         mod_timer(&bp->timer, jiffies + bp->current_interval);
9171         /* Poll link status and check for SFP+ module status */
9172         bnxt_get_port_module_status(bp);
9173
9174         /* VF-reps may need to be re-opened after the PF is re-opened */
9175         if (BNXT_PF(bp))
9176                 bnxt_vf_reps_open(bp);
9177         return 0;
9178
9179 open_err:
9180         bnxt_debug_dev_exit(bp);
9181         bnxt_disable_napi(bp);
9182
9183 open_err_irq:
9184         bnxt_del_napi(bp);
9185
9186 open_err_free_mem:
9187         bnxt_free_skbs(bp);
9188         bnxt_free_irq(bp);
9189         bnxt_free_mem(bp, true);
9190         return rc;
9191 }
9192
9193 /* rtnl_lock held */
9194 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9195 {
9196         int rc = 0;
9197
9198         rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
9199         if (rc) {
9200                 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
9201                 dev_close(bp->dev);
9202         }
9203         return rc;
9204 }
9205
9206 /* rtnl_lock held, open the NIC half way by allocating all resources, but
9207  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
9208  * self tests.
9209  */
9210 int bnxt_half_open_nic(struct bnxt *bp)
9211 {
9212         int rc = 0;
9213
9214         rc = bnxt_alloc_mem(bp, false);
9215         if (rc) {
9216                 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
9217                 goto half_open_err;
9218         }
9219         rc = bnxt_init_nic(bp, false);
9220         if (rc) {
9221                 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
9222                 goto half_open_err;
9223         }
9224         return 0;
9225
9226 half_open_err:
9227         bnxt_free_skbs(bp);
9228         bnxt_free_mem(bp, false);
9229         dev_close(bp->dev);
9230         return rc;
9231 }
9232
9233 /* rtnl_lock held, this call can only be made after a previous successful
9234  * call to bnxt_half_open_nic().
9235  */
9236 void bnxt_half_close_nic(struct bnxt *bp)
9237 {
9238         bnxt_hwrm_resource_free(bp, false, false);
9239         bnxt_free_skbs(bp);
9240         bnxt_free_mem(bp, false);
9241 }
9242
9243 static int bnxt_open(struct net_device *dev)
9244 {
9245         struct bnxt *bp = netdev_priv(dev);
9246         int rc;
9247
9248         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
9249                 netdev_err(bp->dev, "A previous firmware reset did not complete, aborting\n");
9250                 return -ENODEV;
9251         }
9252
9253         rc = bnxt_hwrm_if_change(bp, true);
9254         if (rc)
9255                 return rc;
9256         rc = __bnxt_open_nic(bp, true, true);
9257         if (rc) {
9258                 bnxt_hwrm_if_change(bp, false);
9259         } else {
9260                 if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
9261                         if (BNXT_PF(bp)) {
9262                                 struct bnxt_pf_info *pf = &bp->pf;
9263                                 int n = pf->active_vfs;
9264
9265                                 if (n)
9266                                         bnxt_cfg_hw_sriov(bp, &n, true);
9267                         }
9268                         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9269                                 bnxt_ulp_start(bp, 0);
9270                 }
9271                 bnxt_hwmon_open(bp);
9272         }
9273
9274         return rc;
9275 }
9276
9277 static bool bnxt_drv_busy(struct bnxt *bp)
9278 {
9279         return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
9280                 test_bit(BNXT_STATE_READ_STATS, &bp->state));
9281 }
9282
9283 static void bnxt_get_ring_stats(struct bnxt *bp,
9284                                 struct rtnl_link_stats64 *stats);
9285
9286 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
9287                              bool link_re_init)
9288 {
9289         /* Close the VF-reps before closing PF */
9290         if (BNXT_PF(bp))
9291                 bnxt_vf_reps_close(bp);
9292
9293         /* Change device state to avoid TX queue wake up's */
9294         bnxt_tx_disable(bp);
9295
9296         clear_bit(BNXT_STATE_OPEN, &bp->state);
9297         smp_mb__after_atomic();
9298         while (bnxt_drv_busy(bp))
9299                 msleep(20);
9300
9301         /* Flush rings and and disable interrupts */
9302         bnxt_shutdown_nic(bp, irq_re_init);
9303
9304         /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
9305
9306         bnxt_debug_dev_exit(bp);
9307         bnxt_disable_napi(bp);
9308         del_timer_sync(&bp->timer);
9309         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) &&
9310             pci_is_enabled(bp->pdev))
9311                 pci_disable_device(bp->pdev);
9312
9313         bnxt_free_skbs(bp);
9314
9315         /* Save ring stats before shutdown */
9316         if (bp->bnapi)
9317                 bnxt_get_ring_stats(bp, &bp->net_stats_prev);
9318         if (irq_re_init) {
9319                 bnxt_free_irq(bp);
9320                 bnxt_del_napi(bp);
9321         }
9322         bnxt_free_mem(bp, irq_re_init);
9323 }
9324
9325 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
9326 {
9327         int rc = 0;
9328
9329         if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
9330                 /* If we get here, it means firmware reset is in progress
9331                  * while we are trying to close.  We can safely proceed with
9332                  * the close because we are holding rtnl_lock().  Some firmware
9333                  * messages may fail as we proceed to close.  We set the
9334                  * ABORT_ERR flag here so that the FW reset thread will later
9335                  * abort when it gets the rtnl_lock() and sees the flag.
9336                  */
9337                 netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
9338                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
9339         }
9340
9341 #ifdef CONFIG_BNXT_SRIOV
9342         if (bp->sriov_cfg) {
9343                 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
9344                                                       !bp->sriov_cfg,
9345                                                       BNXT_SRIOV_CFG_WAIT_TMO);
9346                 if (rc)
9347                         netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
9348         }
9349 #endif
9350         __bnxt_close_nic(bp, irq_re_init, link_re_init);
9351         return rc;
9352 }
9353
9354 static int bnxt_close(struct net_device *dev)
9355 {
9356         struct bnxt *bp = netdev_priv(dev);
9357
9358         bnxt_hwmon_close(bp);
9359         bnxt_close_nic(bp, true, true);
9360         bnxt_hwrm_shutdown_link(bp);
9361         bnxt_hwrm_if_change(bp, false);
9362         return 0;
9363 }
9364
9365 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
9366                                    u16 *val)
9367 {
9368         struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr;
9369         struct hwrm_port_phy_mdio_read_input req = {0};
9370         int rc;
9371
9372         if (bp->hwrm_spec_code < 0x10a00)
9373                 return -EOPNOTSUPP;
9374
9375         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1);
9376         req.port_id = cpu_to_le16(bp->pf.port_id);
9377         req.phy_addr = phy_addr;
9378         req.reg_addr = cpu_to_le16(reg & 0x1f);
9379         if (mdio_phy_id_is_c45(phy_addr)) {
9380                 req.cl45_mdio = 1;
9381                 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9382                 req.dev_addr = mdio_phy_id_devad(phy_addr);
9383                 req.reg_addr = cpu_to_le16(reg);
9384         }
9385
9386         mutex_lock(&bp->hwrm_cmd_lock);
9387         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9388         if (!rc)
9389                 *val = le16_to_cpu(resp->reg_data);
9390         mutex_unlock(&bp->hwrm_cmd_lock);
9391         return rc;
9392 }
9393
9394 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
9395                                     u16 val)
9396 {
9397         struct hwrm_port_phy_mdio_write_input req = {0};
9398
9399         if (bp->hwrm_spec_code < 0x10a00)
9400                 return -EOPNOTSUPP;
9401
9402         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1);
9403         req.port_id = cpu_to_le16(bp->pf.port_id);
9404         req.phy_addr = phy_addr;
9405         req.reg_addr = cpu_to_le16(reg & 0x1f);
9406         if (mdio_phy_id_is_c45(phy_addr)) {
9407                 req.cl45_mdio = 1;
9408                 req.phy_addr = mdio_phy_id_prtad(phy_addr);
9409                 req.dev_addr = mdio_phy_id_devad(phy_addr);
9410                 req.reg_addr = cpu_to_le16(reg);
9411         }
9412         req.reg_data = cpu_to_le16(val);
9413
9414         return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9415 }
9416
9417 /* rtnl_lock held */
9418 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9419 {
9420         struct mii_ioctl_data *mdio = if_mii(ifr);
9421         struct bnxt *bp = netdev_priv(dev);
9422         int rc;
9423
9424         switch (cmd) {
9425         case SIOCGMIIPHY:
9426                 mdio->phy_id = bp->link_info.phy_addr;
9427
9428                 /* fallthru */
9429         case SIOCGMIIREG: {
9430                 u16 mii_regval = 0;
9431
9432                 if (!netif_running(dev))
9433                         return -EAGAIN;
9434
9435                 rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
9436                                              &mii_regval);
9437                 mdio->val_out = mii_regval;
9438                 return rc;
9439         }
9440
9441         case SIOCSMIIREG:
9442                 if (!netif_running(dev))
9443                         return -EAGAIN;
9444
9445                 return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
9446                                                 mdio->val_in);
9447
9448         default:
9449                 /* do nothing */
9450                 break;
9451         }
9452         return -EOPNOTSUPP;
9453 }
9454
9455 static void bnxt_get_ring_stats(struct bnxt *bp,
9456                                 struct rtnl_link_stats64 *stats)
9457 {
9458         int i;
9459
9460
9461         for (i = 0; i < bp->cp_nr_rings; i++) {
9462                 struct bnxt_napi *bnapi = bp->bnapi[i];
9463                 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9464                 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
9465
9466                 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
9467                 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
9468                 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
9469
9470                 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
9471                 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
9472                 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
9473
9474                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
9475                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
9476                 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
9477
9478                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
9479                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
9480                 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
9481
9482                 stats->rx_missed_errors +=
9483                         le64_to_cpu(hw_stats->rx_discard_pkts);
9484
9485                 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
9486
9487                 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
9488         }
9489 }
9490
9491 static void bnxt_add_prev_stats(struct bnxt *bp,
9492                                 struct rtnl_link_stats64 *stats)
9493 {
9494         struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
9495
9496         stats->rx_packets += prev_stats->rx_packets;
9497         stats->tx_packets += prev_stats->tx_packets;
9498         stats->rx_bytes += prev_stats->rx_bytes;
9499         stats->tx_bytes += prev_stats->tx_bytes;
9500         stats->rx_missed_errors += prev_stats->rx_missed_errors;
9501         stats->multicast += prev_stats->multicast;
9502         stats->tx_dropped += prev_stats->tx_dropped;
9503 }
9504
9505 static void
9506 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
9507 {
9508         struct bnxt *bp = netdev_priv(dev);
9509
9510         set_bit(BNXT_STATE_READ_STATS, &bp->state);
9511         /* Make sure bnxt_close_nic() sees that we are reading stats before
9512          * we check the BNXT_STATE_OPEN flag.
9513          */
9514         smp_mb__after_atomic();
9515         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9516                 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9517                 *stats = bp->net_stats_prev;
9518                 return;
9519         }
9520
9521         bnxt_get_ring_stats(bp, stats);
9522         bnxt_add_prev_stats(bp, stats);
9523
9524         if (bp->flags & BNXT_FLAG_PORT_STATS) {
9525                 struct rx_port_stats *rx = bp->hw_rx_port_stats;
9526                 struct tx_port_stats *tx = bp->hw_tx_port_stats;
9527
9528                 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
9529                 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
9530                 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
9531                                           le64_to_cpu(rx->rx_ovrsz_frames) +
9532                                           le64_to_cpu(rx->rx_runt_frames);
9533                 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
9534                                    le64_to_cpu(rx->rx_jbr_frames);
9535                 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
9536                 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
9537                 stats->tx_errors = le64_to_cpu(tx->tx_err);
9538         }
9539         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
9540 }
9541
9542 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
9543 {
9544         struct net_device *dev = bp->dev;
9545         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9546         struct netdev_hw_addr *ha;
9547         u8 *haddr;
9548         int mc_count = 0;
9549         bool update = false;
9550         int off = 0;
9551
9552         netdev_for_each_mc_addr(ha, dev) {
9553                 if (mc_count >= BNXT_MAX_MC_ADDRS) {
9554                         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9555                         vnic->mc_list_count = 0;
9556                         return false;
9557                 }
9558                 haddr = ha->addr;
9559                 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
9560                         memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
9561                         update = true;
9562                 }
9563                 off += ETH_ALEN;
9564                 mc_count++;
9565         }
9566         if (mc_count)
9567                 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
9568
9569         if (mc_count != vnic->mc_list_count) {
9570                 vnic->mc_list_count = mc_count;
9571                 update = true;
9572         }
9573         return update;
9574 }
9575
9576 static bool bnxt_uc_list_updated(struct bnxt *bp)
9577 {
9578         struct net_device *dev = bp->dev;
9579         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9580         struct netdev_hw_addr *ha;
9581         int off = 0;
9582
9583         if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
9584                 return true;
9585
9586         netdev_for_each_uc_addr(ha, dev) {
9587                 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
9588                         return true;
9589
9590                 off += ETH_ALEN;
9591         }
9592         return false;
9593 }
9594
9595 static void bnxt_set_rx_mode(struct net_device *dev)
9596 {
9597         struct bnxt *bp = netdev_priv(dev);
9598         struct bnxt_vnic_info *vnic;
9599         bool mc_update = false;
9600         bool uc_update;
9601         u32 mask;
9602
9603         if (!test_bit(BNXT_STATE_OPEN, &bp->state))
9604                 return;
9605
9606         vnic = &bp->vnic_info[0];
9607         mask = vnic->rx_mask;
9608         mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
9609                   CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
9610                   CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
9611                   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
9612
9613         if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
9614                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9615
9616         uc_update = bnxt_uc_list_updated(bp);
9617
9618         if (dev->flags & IFF_BROADCAST)
9619                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
9620         if (dev->flags & IFF_ALLMULTI) {
9621                 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9622                 vnic->mc_list_count = 0;
9623         } else {
9624                 mc_update = bnxt_mc_list_updated(bp, &mask);
9625         }
9626
9627         if (mask != vnic->rx_mask || uc_update || mc_update) {
9628                 vnic->rx_mask = mask;
9629
9630                 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
9631                 bnxt_queue_sp_work(bp);
9632         }
9633 }
9634
9635 static int bnxt_cfg_rx_mode(struct bnxt *bp)
9636 {
9637         struct net_device *dev = bp->dev;
9638         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9639         struct netdev_hw_addr *ha;
9640         int i, off = 0, rc;
9641         bool uc_update;
9642
9643         netif_addr_lock_bh(dev);
9644         uc_update = bnxt_uc_list_updated(bp);
9645         netif_addr_unlock_bh(dev);
9646
9647         if (!uc_update)
9648                 goto skip_uc;
9649
9650         mutex_lock(&bp->hwrm_cmd_lock);
9651         for (i = 1; i < vnic->uc_filter_count; i++) {
9652                 struct hwrm_cfa_l2_filter_free_input req = {0};
9653
9654                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
9655                                        -1);
9656
9657                 req.l2_filter_id = vnic->fw_l2_filter_id[i];
9658
9659                 rc = _hwrm_send_message(bp, &req, sizeof(req),
9660                                         HWRM_CMD_TIMEOUT);
9661         }
9662         mutex_unlock(&bp->hwrm_cmd_lock);
9663
9664         vnic->uc_filter_count = 1;
9665
9666         netif_addr_lock_bh(dev);
9667         if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
9668                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
9669         } else {
9670                 netdev_for_each_uc_addr(ha, dev) {
9671                         memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
9672                         off += ETH_ALEN;
9673                         vnic->uc_filter_count++;
9674                 }
9675         }
9676         netif_addr_unlock_bh(dev);
9677
9678         for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
9679                 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
9680                 if (rc) {
9681                         netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
9682                                    rc);
9683                         vnic->uc_filter_count = i;
9684                         return rc;
9685                 }
9686         }
9687
9688 skip_uc:
9689         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
9690         if (rc && vnic->mc_list_count) {
9691                 netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
9692                             rc);
9693                 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
9694                 vnic->mc_list_count = 0;
9695                 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
9696         }
9697         if (rc)
9698                 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
9699                            rc);
9700
9701         return rc;
9702 }
9703
9704 static bool bnxt_can_reserve_rings(struct bnxt *bp)
9705 {
9706 #ifdef CONFIG_BNXT_SRIOV
9707         if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
9708                 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9709
9710                 /* No minimum rings were provisioned by the PF.  Don't
9711                  * reserve rings by default when device is down.
9712                  */
9713                 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
9714                         return true;
9715
9716                 if (!netif_running(bp->dev))
9717                         return false;
9718         }
9719 #endif
9720         return true;
9721 }
9722
9723 /* If the chip and firmware supports RFS */
9724 static bool bnxt_rfs_supported(struct bnxt *bp)
9725 {
9726         if (bp->flags & BNXT_FLAG_CHIP_P5) {
9727                 if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
9728                         return true;
9729                 return false;
9730         }
9731         if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
9732                 return true;
9733         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9734                 return true;
9735         return false;
9736 }
9737
9738 /* If runtime conditions support RFS */
9739 static bool bnxt_rfs_capable(struct bnxt *bp)
9740 {
9741 #ifdef CONFIG_RFS_ACCEL
9742         int vnics, max_vnics, max_rss_ctxs;
9743
9744         if (bp->flags & BNXT_FLAG_CHIP_P5)
9745                 return bnxt_rfs_supported(bp);
9746         if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
9747                 return false;
9748
9749         vnics = 1 + bp->rx_nr_rings;
9750         max_vnics = bnxt_get_max_func_vnics(bp);
9751         max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
9752
9753         /* RSS contexts not a limiting factor */
9754         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
9755                 max_rss_ctxs = max_vnics;
9756         if (vnics > max_vnics || vnics > max_rss_ctxs) {
9757                 if (bp->rx_nr_rings > 1)
9758                         netdev_warn(bp->dev,
9759                                     "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
9760                                     min(max_rss_ctxs - 1, max_vnics - 1));
9761                 return false;
9762         }
9763
9764         if (!BNXT_NEW_RM(bp))
9765                 return true;
9766
9767         if (vnics == bp->hw_resc.resv_vnics)
9768                 return true;
9769
9770         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
9771         if (vnics <= bp->hw_resc.resv_vnics)
9772                 return true;
9773
9774         netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
9775         bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
9776         return false;
9777 #else
9778         return false;
9779 #endif
9780 }
9781
9782 static netdev_features_t bnxt_fix_features(struct net_device *dev,
9783                                            netdev_features_t features)
9784 {
9785         struct bnxt *bp = netdev_priv(dev);
9786
9787         if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
9788                 features &= ~NETIF_F_NTUPLE;
9789
9790         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9791                 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
9792
9793         if (!(features & NETIF_F_GRO))
9794                 features &= ~NETIF_F_GRO_HW;
9795
9796         if (features & NETIF_F_GRO_HW)
9797                 features &= ~NETIF_F_LRO;
9798
9799         /* Both CTAG and STAG VLAN accelaration on the RX side have to be
9800          * turned on or off together.
9801          */
9802         if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
9803             (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
9804                 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
9805                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9806                                       NETIF_F_HW_VLAN_STAG_RX);
9807                 else
9808                         features |= NETIF_F_HW_VLAN_CTAG_RX |
9809                                     NETIF_F_HW_VLAN_STAG_RX;
9810         }
9811 #ifdef CONFIG_BNXT_SRIOV
9812         if (BNXT_VF(bp)) {
9813                 if (bp->vf.vlan) {
9814                         features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
9815                                       NETIF_F_HW_VLAN_STAG_RX);
9816                 }
9817         }
9818 #endif
9819         return features;
9820 }
9821
9822 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
9823 {
9824         struct bnxt *bp = netdev_priv(dev);
9825         u32 flags = bp->flags;
9826         u32 changes;
9827         int rc = 0;
9828         bool re_init = false;
9829         bool update_tpa = false;
9830
9831         flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
9832         if (features & NETIF_F_GRO_HW)
9833                 flags |= BNXT_FLAG_GRO;
9834         else if (features & NETIF_F_LRO)
9835                 flags |= BNXT_FLAG_LRO;
9836
9837         if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
9838                 flags &= ~BNXT_FLAG_TPA;
9839
9840         if (features & NETIF_F_HW_VLAN_CTAG_RX)
9841                 flags |= BNXT_FLAG_STRIP_VLAN;
9842
9843         if (features & NETIF_F_NTUPLE)
9844                 flags |= BNXT_FLAG_RFS;
9845
9846         changes = flags ^ bp->flags;
9847         if (changes & BNXT_FLAG_TPA) {
9848                 update_tpa = true;
9849                 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
9850                     (flags & BNXT_FLAG_TPA) == 0 ||
9851                     (bp->flags & BNXT_FLAG_CHIP_P5))
9852                         re_init = true;
9853         }
9854
9855         if (changes & ~BNXT_FLAG_TPA)
9856                 re_init = true;
9857
9858         if (flags != bp->flags) {
9859                 u32 old_flags = bp->flags;
9860
9861                 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
9862                         bp->flags = flags;
9863                         if (update_tpa)
9864                                 bnxt_set_ring_params(bp);
9865                         return rc;
9866                 }
9867
9868                 if (re_init) {
9869                         bnxt_close_nic(bp, false, false);
9870                         bp->flags = flags;
9871                         if (update_tpa)
9872                                 bnxt_set_ring_params(bp);
9873
9874                         return bnxt_open_nic(bp, false, false);
9875                 }
9876                 if (update_tpa) {
9877                         bp->flags = flags;
9878                         rc = bnxt_set_tpa(bp,
9879                                           (flags & BNXT_FLAG_TPA) ?
9880                                           true : false);
9881                         if (rc)
9882                                 bp->flags = old_flags;
9883                 }
9884         }
9885         return rc;
9886 }
9887
9888 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
9889                                        u32 ring_id, u32 *prod, u32 *cons)
9890 {
9891         struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
9892         struct hwrm_dbg_ring_info_get_input req = {0};
9893         int rc;
9894
9895         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
9896         req.ring_type = ring_type;
9897         req.fw_ring_id = cpu_to_le32(ring_id);
9898         mutex_lock(&bp->hwrm_cmd_lock);
9899         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
9900         if (!rc) {
9901                 *prod = le32_to_cpu(resp->producer_index);
9902                 *cons = le32_to_cpu(resp->consumer_index);
9903         }
9904         mutex_unlock(&bp->hwrm_cmd_lock);
9905         return rc;
9906 }
9907
9908 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
9909 {
9910         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
9911         int i = bnapi->index;
9912
9913         if (!txr)
9914                 return;
9915
9916         netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
9917                     i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
9918                     txr->tx_cons);
9919 }
9920
9921 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
9922 {
9923         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
9924         int i = bnapi->index;
9925
9926         if (!rxr)
9927                 return;
9928
9929         netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
9930                     i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
9931                     rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
9932                     rxr->rx_sw_agg_prod);
9933 }
9934
9935 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
9936 {
9937         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
9938         int i = bnapi->index;
9939
9940         netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
9941                     i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
9942 }
9943
9944 static void bnxt_dbg_dump_states(struct bnxt *bp)
9945 {
9946         int i;
9947         struct bnxt_napi *bnapi;
9948
9949         for (i = 0; i < bp->cp_nr_rings; i++) {
9950                 bnapi = bp->bnapi[i];
9951                 if (netif_msg_drv(bp)) {
9952                         bnxt_dump_tx_sw_state(bnapi);
9953                         bnxt_dump_rx_sw_state(bnapi);
9954                         bnxt_dump_cp_sw_state(bnapi);
9955                 }
9956         }
9957 }
9958
9959 static void bnxt_reset_task(struct bnxt *bp, bool silent)
9960 {
9961         if (!silent)
9962                 bnxt_dbg_dump_states(bp);
9963         if (netif_running(bp->dev)) {
9964                 int rc;
9965
9966                 if (silent) {
9967                         bnxt_close_nic(bp, false, false);
9968                         bnxt_open_nic(bp, false, false);
9969                 } else {
9970                         bnxt_ulp_stop(bp);
9971                         bnxt_close_nic(bp, true, false);
9972                         rc = bnxt_open_nic(bp, true, false);
9973                         bnxt_ulp_start(bp, rc);
9974                 }
9975         }
9976 }
9977
9978 static void bnxt_tx_timeout(struct net_device *dev)
9979 {
9980         struct bnxt *bp = netdev_priv(dev);
9981
9982         netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
9983         set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
9984         bnxt_queue_sp_work(bp);
9985 }
9986
9987 static void bnxt_fw_health_check(struct bnxt *bp)
9988 {
9989         struct bnxt_fw_health *fw_health = bp->fw_health;
9990         u32 val;
9991
9992         if (!fw_health || !fw_health->enabled ||
9993             test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
9994                 return;
9995
9996         if (fw_health->tmr_counter) {
9997                 fw_health->tmr_counter--;
9998                 return;
9999         }
10000
10001         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10002         if (val == fw_health->last_fw_heartbeat)
10003                 goto fw_reset;
10004
10005         fw_health->last_fw_heartbeat = val;
10006
10007         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10008         if (val != fw_health->last_fw_reset_cnt)
10009                 goto fw_reset;
10010
10011         fw_health->tmr_counter = fw_health->tmr_multiplier;
10012         return;
10013
10014 fw_reset:
10015         set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
10016         bnxt_queue_sp_work(bp);
10017 }
10018
10019 static void bnxt_timer(struct timer_list *t)
10020 {
10021         struct bnxt *bp = from_timer(bp, t, timer);
10022         struct net_device *dev = bp->dev;
10023
10024         if (!netif_running(dev))
10025                 return;
10026
10027         if (atomic_read(&bp->intr_sem) != 0)
10028                 goto bnxt_restart_timer;
10029
10030         if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
10031                 bnxt_fw_health_check(bp);
10032
10033         if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
10034             bp->stats_coal_ticks) {
10035                 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
10036                 bnxt_queue_sp_work(bp);
10037         }
10038
10039         if (bnxt_tc_flower_enabled(bp)) {
10040                 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
10041                 bnxt_queue_sp_work(bp);
10042         }
10043
10044         if (bp->link_info.phy_retry) {
10045                 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
10046                         bp->link_info.phy_retry = false;
10047                         netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
10048                 } else {
10049                         set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
10050                         bnxt_queue_sp_work(bp);
10051                 }
10052         }
10053
10054         if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
10055                 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
10056                 bnxt_queue_sp_work(bp);
10057         }
10058 bnxt_restart_timer:
10059         mod_timer(&bp->timer, jiffies + bp->current_interval);
10060 }
10061
10062 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
10063 {
10064         /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
10065          * set.  If the device is being closed, bnxt_close() may be holding
10066          * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
10067          * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
10068          */
10069         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10070         rtnl_lock();
10071 }
10072
10073 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
10074 {
10075         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10076         rtnl_unlock();
10077 }
10078
10079 /* Only called from bnxt_sp_task() */
10080 static void bnxt_reset(struct bnxt *bp, bool silent)
10081 {
10082         bnxt_rtnl_lock_sp(bp);
10083         if (test_bit(BNXT_STATE_OPEN, &bp->state))
10084                 bnxt_reset_task(bp, silent);
10085         bnxt_rtnl_unlock_sp(bp);
10086 }
10087
10088 static void bnxt_fw_reset_close(struct bnxt *bp)
10089 {
10090         bnxt_ulp_stop(bp);
10091         __bnxt_close_nic(bp, true, false);
10092         bnxt_clear_int_mode(bp);
10093         bnxt_hwrm_func_drv_unrgtr(bp);
10094         bnxt_free_ctx_mem(bp);
10095         kfree(bp->ctx);
10096         bp->ctx = NULL;
10097 }
10098
10099 static bool is_bnxt_fw_ok(struct bnxt *bp)
10100 {
10101         struct bnxt_fw_health *fw_health = bp->fw_health;
10102         bool no_heartbeat = false, has_reset = false;
10103         u32 val;
10104
10105         val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
10106         if (val == fw_health->last_fw_heartbeat)
10107                 no_heartbeat = true;
10108
10109         val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
10110         if (val != fw_health->last_fw_reset_cnt)
10111                 has_reset = true;
10112
10113         if (!no_heartbeat && has_reset)
10114                 return true;
10115
10116         return false;
10117 }
10118
10119 /* rtnl_lock is acquired before calling this function */
10120 static void bnxt_force_fw_reset(struct bnxt *bp)
10121 {
10122         struct bnxt_fw_health *fw_health = bp->fw_health;
10123         u32 wait_dsecs;
10124
10125         if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
10126             test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10127                 return;
10128
10129         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10130         bnxt_fw_reset_close(bp);
10131         wait_dsecs = fw_health->master_func_wait_dsecs;
10132         if (fw_health->master) {
10133                 if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
10134                         wait_dsecs = 0;
10135                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10136         } else {
10137                 bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
10138                 wait_dsecs = fw_health->normal_func_wait_dsecs;
10139                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10140         }
10141
10142         bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
10143         bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
10144         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10145 }
10146
10147 void bnxt_fw_exception(struct bnxt *bp)
10148 {
10149         netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
10150         set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
10151         bnxt_rtnl_lock_sp(bp);
10152         bnxt_force_fw_reset(bp);
10153         bnxt_rtnl_unlock_sp(bp);
10154 }
10155
10156 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
10157  * < 0 on error.
10158  */
10159 static int bnxt_get_registered_vfs(struct bnxt *bp)
10160 {
10161 #ifdef CONFIG_BNXT_SRIOV
10162         int rc;
10163
10164         if (!BNXT_PF(bp))
10165                 return 0;
10166
10167         rc = bnxt_hwrm_func_qcfg(bp);
10168         if (rc) {
10169                 netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
10170                 return rc;
10171         }
10172         if (bp->pf.registered_vfs)
10173                 return bp->pf.registered_vfs;
10174         if (bp->sriov_cfg)
10175                 return 1;
10176 #endif
10177         return 0;
10178 }
10179
10180 void bnxt_fw_reset(struct bnxt *bp)
10181 {
10182         bnxt_rtnl_lock_sp(bp);
10183         if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
10184             !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10185                 int n = 0, tmo;
10186
10187                 set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10188                 if (bp->pf.active_vfs &&
10189                     !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
10190                         n = bnxt_get_registered_vfs(bp);
10191                 if (n < 0) {
10192                         netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
10193                                    n);
10194                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10195                         dev_close(bp->dev);
10196                         goto fw_reset_exit;
10197                 } else if (n > 0) {
10198                         u16 vf_tmo_dsecs = n * 10;
10199
10200                         if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
10201                                 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
10202                         bp->fw_reset_state =
10203                                 BNXT_FW_RESET_STATE_POLL_VF;
10204                         bnxt_queue_fw_reset_work(bp, HZ / 10);
10205                         goto fw_reset_exit;
10206                 }
10207                 bnxt_fw_reset_close(bp);
10208                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10209                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
10210                         tmo = HZ / 10;
10211                 } else {
10212                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10213                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
10214                 }
10215                 bnxt_queue_fw_reset_work(bp, tmo);
10216         }
10217 fw_reset_exit:
10218         bnxt_rtnl_unlock_sp(bp);
10219 }
10220
10221 static void bnxt_chk_missed_irq(struct bnxt *bp)
10222 {
10223         int i;
10224
10225         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
10226                 return;
10227
10228         for (i = 0; i < bp->cp_nr_rings; i++) {
10229                 struct bnxt_napi *bnapi = bp->bnapi[i];
10230                 struct bnxt_cp_ring_info *cpr;
10231                 u32 fw_ring_id;
10232                 int j;
10233
10234                 if (!bnapi)
10235                         continue;
10236
10237                 cpr = &bnapi->cp_ring;
10238                 for (j = 0; j < 2; j++) {
10239                         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
10240                         u32 val[2];
10241
10242                         if (!cpr2 || cpr2->has_more_work ||
10243                             !bnxt_has_work(bp, cpr2))
10244                                 continue;
10245
10246                         if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
10247                                 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
10248                                 continue;
10249                         }
10250                         fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
10251                         bnxt_dbg_hwrm_ring_info_get(bp,
10252                                 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
10253                                 fw_ring_id, &val[0], &val[1]);
10254                         cpr->missed_irqs++;
10255                 }
10256         }
10257 }
10258
10259 static void bnxt_cfg_ntp_filters(struct bnxt *);
10260
10261 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
10262 {
10263         struct bnxt_link_info *link_info = &bp->link_info;
10264
10265         if (BNXT_AUTO_MODE(link_info->auto_mode)) {
10266                 link_info->autoneg = BNXT_AUTONEG_SPEED;
10267                 if (bp->hwrm_spec_code >= 0x10201) {
10268                         if (link_info->auto_pause_setting &
10269                             PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
10270                                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10271                 } else {
10272                         link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
10273                 }
10274                 link_info->advertising = link_info->auto_link_speeds;
10275         } else {
10276                 link_info->req_link_speed = link_info->force_link_speed;
10277                 link_info->req_duplex = link_info->duplex_setting;
10278         }
10279         if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
10280                 link_info->req_flow_ctrl =
10281                         link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
10282         else
10283                 link_info->req_flow_ctrl = link_info->force_pause_setting;
10284 }
10285
10286 static void bnxt_sp_task(struct work_struct *work)
10287 {
10288         struct bnxt *bp = container_of(work, struct bnxt, sp_task);
10289
10290         set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10291         smp_mb__after_atomic();
10292         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10293                 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10294                 return;
10295         }
10296
10297         if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
10298                 bnxt_cfg_rx_mode(bp);
10299
10300         if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
10301                 bnxt_cfg_ntp_filters(bp);
10302         if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
10303                 bnxt_hwrm_exec_fwd_req(bp);
10304         if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
10305                 bnxt_hwrm_tunnel_dst_port_alloc(
10306                         bp, bp->vxlan_port,
10307                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10308         }
10309         if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
10310                 bnxt_hwrm_tunnel_dst_port_free(
10311                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
10312         }
10313         if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
10314                 bnxt_hwrm_tunnel_dst_port_alloc(
10315                         bp, bp->nge_port,
10316                         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10317         }
10318         if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
10319                 bnxt_hwrm_tunnel_dst_port_free(
10320                         bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
10321         }
10322         if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
10323                 bnxt_hwrm_port_qstats(bp);
10324                 bnxt_hwrm_port_qstats_ext(bp);
10325                 bnxt_hwrm_pcie_qstats(bp);
10326         }
10327
10328         if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
10329                 int rc;
10330
10331                 mutex_lock(&bp->link_lock);
10332                 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
10333                                        &bp->sp_event))
10334                         bnxt_hwrm_phy_qcaps(bp);
10335
10336                 if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
10337                                        &bp->sp_event))
10338                         bnxt_init_ethtool_link_settings(bp);
10339
10340                 rc = bnxt_update_link(bp, true);
10341                 mutex_unlock(&bp->link_lock);
10342                 if (rc)
10343                         netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
10344                                    rc);
10345         }
10346         if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
10347                 int rc;
10348
10349                 mutex_lock(&bp->link_lock);
10350                 rc = bnxt_update_phy_setting(bp);
10351                 mutex_unlock(&bp->link_lock);
10352                 if (rc) {
10353                         netdev_warn(bp->dev, "update phy settings retry failed\n");
10354                 } else {
10355                         bp->link_info.phy_retry = false;
10356                         netdev_info(bp->dev, "update phy settings retry succeeded\n");
10357                 }
10358         }
10359         if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
10360                 mutex_lock(&bp->link_lock);
10361                 bnxt_get_port_module_status(bp);
10362                 mutex_unlock(&bp->link_lock);
10363         }
10364
10365         if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
10366                 bnxt_tc_flow_stats_work(bp);
10367
10368         if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
10369                 bnxt_chk_missed_irq(bp);
10370
10371         /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
10372          * must be the last functions to be called before exiting.
10373          */
10374         if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
10375                 bnxt_reset(bp, false);
10376
10377         if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
10378                 bnxt_reset(bp, true);
10379
10380         if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
10381                 bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
10382
10383         if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
10384                 if (!is_bnxt_fw_ok(bp))
10385                         bnxt_devlink_health_report(bp,
10386                                                    BNXT_FW_EXCEPTION_SP_EVENT);
10387         }
10388
10389         smp_mb__before_atomic();
10390         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
10391 }
10392
10393 /* Under rtnl_lock */
10394 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
10395                      int tx_xdp)
10396 {
10397         int max_rx, max_tx, tx_sets = 1;
10398         int tx_rings_needed, stats;
10399         int rx_rings = rx;
10400         int cp, vnics, rc;
10401
10402         if (tcs)
10403                 tx_sets = tcs;
10404
10405         rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
10406         if (rc)
10407                 return rc;
10408
10409         if (max_rx < rx)
10410                 return -ENOMEM;
10411
10412         tx_rings_needed = tx * tx_sets + tx_xdp;
10413         if (max_tx < tx_rings_needed)
10414                 return -ENOMEM;
10415
10416         vnics = 1;
10417         if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
10418                 vnics += rx_rings;
10419
10420         if (bp->flags & BNXT_FLAG_AGG_RINGS)
10421                 rx_rings <<= 1;
10422         cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
10423         stats = cp;
10424         if (BNXT_NEW_RM(bp)) {
10425                 cp += bnxt_get_ulp_msix_num(bp);
10426                 stats += bnxt_get_ulp_stat_ctxs(bp);
10427         }
10428         return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
10429                                      stats, vnics);
10430 }
10431
10432 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
10433 {
10434         if (bp->bar2) {
10435                 pci_iounmap(pdev, bp->bar2);
10436                 bp->bar2 = NULL;
10437         }
10438
10439         if (bp->bar1) {
10440                 pci_iounmap(pdev, bp->bar1);
10441                 bp->bar1 = NULL;
10442         }
10443
10444         if (bp->bar0) {
10445                 pci_iounmap(pdev, bp->bar0);
10446                 bp->bar0 = NULL;
10447         }
10448 }
10449
10450 static void bnxt_cleanup_pci(struct bnxt *bp)
10451 {
10452         bnxt_unmap_bars(bp, bp->pdev);
10453         pci_release_regions(bp->pdev);
10454         if (pci_is_enabled(bp->pdev))
10455                 pci_disable_device(bp->pdev);
10456 }
10457
10458 static void bnxt_init_dflt_coal(struct bnxt *bp)
10459 {
10460         struct bnxt_coal *coal;
10461
10462         /* Tick values in micro seconds.
10463          * 1 coal_buf x bufs_per_record = 1 completion record.
10464          */
10465         coal = &bp->rx_coal;
10466         coal->coal_ticks = 10;
10467         coal->coal_bufs = 30;
10468         coal->coal_ticks_irq = 1;
10469         coal->coal_bufs_irq = 2;
10470         coal->idle_thresh = 50;
10471         coal->bufs_per_record = 2;
10472         coal->budget = 64;              /* NAPI budget */
10473
10474         coal = &bp->tx_coal;
10475         coal->coal_ticks = 28;
10476         coal->coal_bufs = 30;
10477         coal->coal_ticks_irq = 2;
10478         coal->coal_bufs_irq = 2;
10479         coal->bufs_per_record = 1;
10480
10481         bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
10482 }
10483
10484 static void bnxt_alloc_fw_health(struct bnxt *bp)
10485 {
10486         if (bp->fw_health)
10487                 return;
10488
10489         if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
10490             !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
10491                 return;
10492
10493         bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
10494         if (!bp->fw_health) {
10495                 netdev_warn(bp->dev, "Failed to allocate fw_health\n");
10496                 bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
10497                 bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
10498         }
10499 }
10500
10501 static int bnxt_fw_init_one_p1(struct bnxt *bp)
10502 {
10503         int rc;
10504
10505         bp->fw_cap = 0;
10506         rc = bnxt_hwrm_ver_get(bp);
10507         if (rc)
10508                 return rc;
10509
10510         if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
10511                 rc = bnxt_alloc_kong_hwrm_resources(bp);
10512                 if (rc)
10513                         bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL;
10514         }
10515
10516         if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
10517             bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
10518                 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
10519                 if (rc)
10520                         return rc;
10521         }
10522         rc = bnxt_hwrm_func_reset(bp);
10523         if (rc)
10524                 return -ENODEV;
10525
10526         bnxt_hwrm_fw_set_time(bp);
10527         return 0;
10528 }
10529
10530 static int bnxt_fw_init_one_p2(struct bnxt *bp)
10531 {
10532         int rc;
10533
10534         /* Get the MAX capabilities for this function */
10535         rc = bnxt_hwrm_func_qcaps(bp);
10536         if (rc) {
10537                 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
10538                            rc);
10539                 return -ENODEV;
10540         }
10541
10542         rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
10543         if (rc)
10544                 netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
10545                             rc);
10546
10547         bnxt_alloc_fw_health(bp);
10548         rc = bnxt_hwrm_error_recovery_qcfg(bp);
10549         if (rc)
10550                 netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
10551                             rc);
10552
10553         rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
10554         if (rc)
10555                 return -ENODEV;
10556
10557         bnxt_hwrm_func_qcfg(bp);
10558         bnxt_hwrm_vnic_qcaps(bp);
10559         bnxt_hwrm_port_led_qcaps(bp);
10560         bnxt_ethtool_init(bp);
10561         bnxt_dcb_init(bp);
10562         return 0;
10563 }
10564
10565 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
10566 {
10567         bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
10568         bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
10569                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
10570                            VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
10571                            VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
10572         if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
10573                 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
10574                 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
10575                                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10576         }
10577 }
10578
10579 static void bnxt_set_dflt_rfs(struct bnxt *bp)
10580 {
10581         struct net_device *dev = bp->dev;
10582
10583         dev->hw_features &= ~NETIF_F_NTUPLE;
10584         dev->features &= ~NETIF_F_NTUPLE;
10585         bp->flags &= ~BNXT_FLAG_RFS;
10586         if (bnxt_rfs_supported(bp)) {
10587                 dev->hw_features |= NETIF_F_NTUPLE;
10588                 if (bnxt_rfs_capable(bp)) {
10589                         bp->flags |= BNXT_FLAG_RFS;
10590                         dev->features |= NETIF_F_NTUPLE;
10591                 }
10592         }
10593 }
10594
10595 static void bnxt_fw_init_one_p3(struct bnxt *bp)
10596 {
10597         struct pci_dev *pdev = bp->pdev;
10598
10599         bnxt_set_dflt_rss_hash_type(bp);
10600         bnxt_set_dflt_rfs(bp);
10601
10602         bnxt_get_wol_settings(bp);
10603         if (bp->flags & BNXT_FLAG_WOL_CAP)
10604                 device_set_wakeup_enable(&pdev->dev, bp->wol);
10605         else
10606                 device_set_wakeup_capable(&pdev->dev, false);
10607
10608         bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
10609         bnxt_hwrm_coal_params_qcaps(bp);
10610 }
10611
10612 static int bnxt_fw_init_one(struct bnxt *bp)
10613 {
10614         int rc;
10615
10616         rc = bnxt_fw_init_one_p1(bp);
10617         if (rc) {
10618                 netdev_err(bp->dev, "Firmware init phase 1 failed\n");
10619                 return rc;
10620         }
10621         rc = bnxt_fw_init_one_p2(bp);
10622         if (rc) {
10623                 netdev_err(bp->dev, "Firmware init phase 2 failed\n");
10624                 return rc;
10625         }
10626         rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
10627         if (rc)
10628                 return rc;
10629         bnxt_fw_init_one_p3(bp);
10630         return 0;
10631 }
10632
10633 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
10634 {
10635         struct bnxt_fw_health *fw_health = bp->fw_health;
10636         u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
10637         u32 val = fw_health->fw_reset_seq_vals[reg_idx];
10638         u32 reg_type, reg_off, delay_msecs;
10639
10640         delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
10641         reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
10642         reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
10643         switch (reg_type) {
10644         case BNXT_FW_HEALTH_REG_TYPE_CFG:
10645                 pci_write_config_dword(bp->pdev, reg_off, val);
10646                 break;
10647         case BNXT_FW_HEALTH_REG_TYPE_GRC:
10648                 writel(reg_off & BNXT_GRC_BASE_MASK,
10649                        bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
10650                 reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
10651                 /* fall through */
10652         case BNXT_FW_HEALTH_REG_TYPE_BAR0:
10653                 writel(val, bp->bar0 + reg_off);
10654                 break;
10655         case BNXT_FW_HEALTH_REG_TYPE_BAR1:
10656                 writel(val, bp->bar1 + reg_off);
10657                 break;
10658         }
10659         if (delay_msecs) {
10660                 pci_read_config_dword(bp->pdev, 0, &val);
10661                 msleep(delay_msecs);
10662         }
10663 }
10664
10665 static void bnxt_reset_all(struct bnxt *bp)
10666 {
10667         struct bnxt_fw_health *fw_health = bp->fw_health;
10668         int i, rc;
10669
10670         if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10671 #ifdef CONFIG_TEE_BNXT_FW
10672                 rc = tee_bnxt_fw_load();
10673                 if (rc)
10674                         netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc);
10675                 bp->fw_reset_timestamp = jiffies;
10676 #endif
10677                 return;
10678         }
10679
10680         if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
10681                 for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
10682                         bnxt_fw_reset_writel(bp, i);
10683         } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
10684                 struct hwrm_fw_reset_input req = {0};
10685
10686                 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1);
10687                 req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr);
10688                 req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
10689                 req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
10690                 req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
10691                 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
10692                 if (rc)
10693                         netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
10694         }
10695         bp->fw_reset_timestamp = jiffies;
10696 }
10697
10698 static void bnxt_fw_reset_task(struct work_struct *work)
10699 {
10700         struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
10701         int rc;
10702
10703         if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10704                 netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
10705                 return;
10706         }
10707
10708         switch (bp->fw_reset_state) {
10709         case BNXT_FW_RESET_STATE_POLL_VF: {
10710                 int n = bnxt_get_registered_vfs(bp);
10711                 int tmo;
10712
10713                 if (n < 0) {
10714                         netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
10715                                    n, jiffies_to_msecs(jiffies -
10716                                    bp->fw_reset_timestamp));
10717                         goto fw_reset_abort;
10718                 } else if (n > 0) {
10719                         if (time_after(jiffies, bp->fw_reset_timestamp +
10720                                        (bp->fw_reset_max_dsecs * HZ / 10))) {
10721                                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10722                                 bp->fw_reset_state = 0;
10723                                 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
10724                                            n);
10725                                 return;
10726                         }
10727                         bnxt_queue_fw_reset_work(bp, HZ / 10);
10728                         return;
10729                 }
10730                 bp->fw_reset_timestamp = jiffies;
10731                 rtnl_lock();
10732                 bnxt_fw_reset_close(bp);
10733                 if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
10734                         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
10735                         tmo = HZ / 10;
10736                 } else {
10737                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10738                         tmo = bp->fw_reset_min_dsecs * HZ / 10;
10739                 }
10740                 rtnl_unlock();
10741                 bnxt_queue_fw_reset_work(bp, tmo);
10742                 return;
10743         }
10744         case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
10745                 u32 val;
10746
10747                 val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
10748                 if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
10749                     !time_after(jiffies, bp->fw_reset_timestamp +
10750                     (bp->fw_reset_max_dsecs * HZ / 10))) {
10751                         bnxt_queue_fw_reset_work(bp, HZ / 5);
10752                         return;
10753                 }
10754
10755                 if (!bp->fw_health->master) {
10756                         u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
10757
10758                         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10759                         bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
10760                         return;
10761                 }
10762                 bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
10763         }
10764         /* fall through */
10765         case BNXT_FW_RESET_STATE_RESET_FW:
10766                 bnxt_reset_all(bp);
10767                 bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
10768                 bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
10769                 return;
10770         case BNXT_FW_RESET_STATE_ENABLE_DEV:
10771                 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
10772                     bp->fw_health) {
10773                         u32 val;
10774
10775                         val = bnxt_fw_health_readl(bp,
10776                                                    BNXT_FW_RESET_INPROG_REG);
10777                         if (val)
10778                                 netdev_warn(bp->dev, "FW reset inprog %x after min wait time.\n",
10779                                             val);
10780                 }
10781                 clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
10782                 if (pci_enable_device(bp->pdev)) {
10783                         netdev_err(bp->dev, "Cannot re-enable PCI device\n");
10784                         goto fw_reset_abort;
10785                 }
10786                 pci_set_master(bp->pdev);
10787                 bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
10788                 /* fall through */
10789         case BNXT_FW_RESET_STATE_POLL_FW:
10790                 bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
10791                 rc = __bnxt_hwrm_ver_get(bp, true);
10792                 if (rc) {
10793                         if (time_after(jiffies, bp->fw_reset_timestamp +
10794                                        (bp->fw_reset_max_dsecs * HZ / 10))) {
10795                                 netdev_err(bp->dev, "Firmware reset aborted\n");
10796                                 goto fw_reset_abort;
10797                         }
10798                         bnxt_queue_fw_reset_work(bp, HZ / 5);
10799                         return;
10800                 }
10801                 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
10802                 bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
10803                 /* fall through */
10804         case BNXT_FW_RESET_STATE_OPENING:
10805                 while (!rtnl_trylock()) {
10806                         bnxt_queue_fw_reset_work(bp, HZ / 10);
10807                         return;
10808                 }
10809                 rc = bnxt_open(bp->dev);
10810                 if (rc) {
10811                         netdev_err(bp->dev, "bnxt_open_nic() failed\n");
10812                         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10813                         dev_close(bp->dev);
10814                 }
10815
10816                 bp->fw_reset_state = 0;
10817                 /* Make sure fw_reset_state is 0 before clearing the flag */
10818                 smp_mb__before_atomic();
10819                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10820                 bnxt_ulp_start(bp, rc);
10821                 bnxt_dl_health_status_update(bp, true);
10822                 rtnl_unlock();
10823                 break;
10824         }
10825         return;
10826
10827 fw_reset_abort:
10828         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
10829         if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
10830                 bnxt_dl_health_status_update(bp, false);
10831         bp->fw_reset_state = 0;
10832         rtnl_lock();
10833         dev_close(bp->dev);
10834         rtnl_unlock();
10835 }
10836
10837 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
10838 {
10839         int rc;
10840         struct bnxt *bp = netdev_priv(dev);
10841
10842         SET_NETDEV_DEV(dev, &pdev->dev);
10843
10844         /* enable device (incl. PCI PM wakeup), and bus-mastering */
10845         rc = pci_enable_device(pdev);
10846         if (rc) {
10847                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
10848                 goto init_err;
10849         }
10850
10851         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10852                 dev_err(&pdev->dev,
10853                         "Cannot find PCI device base address, aborting\n");
10854                 rc = -ENODEV;
10855                 goto init_err_disable;
10856         }
10857
10858         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10859         if (rc) {
10860                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
10861                 goto init_err_disable;
10862         }
10863
10864         if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
10865             dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
10866                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
10867                 goto init_err_disable;
10868         }
10869
10870         pci_set_master(pdev);
10871
10872         bp->dev = dev;
10873         bp->pdev = pdev;
10874
10875         bp->bar0 = pci_ioremap_bar(pdev, 0);
10876         if (!bp->bar0) {
10877                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
10878                 rc = -ENOMEM;
10879                 goto init_err_release;
10880         }
10881
10882         bp->bar1 = pci_ioremap_bar(pdev, 2);
10883         if (!bp->bar1) {
10884                 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
10885                 rc = -ENOMEM;
10886                 goto init_err_release;
10887         }
10888
10889         bp->bar2 = pci_ioremap_bar(pdev, 4);
10890         if (!bp->bar2) {
10891                 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
10892                 rc = -ENOMEM;
10893                 goto init_err_release;
10894         }
10895
10896         pci_enable_pcie_error_reporting(pdev);
10897
10898         INIT_WORK(&bp->sp_task, bnxt_sp_task);
10899         INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
10900
10901         spin_lock_init(&bp->ntp_fltr_lock);
10902 #if BITS_PER_LONG == 32
10903         spin_lock_init(&bp->db_lock);
10904 #endif
10905
10906         bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
10907         bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
10908
10909         bnxt_init_dflt_coal(bp);
10910
10911         timer_setup(&bp->timer, bnxt_timer, 0);
10912         bp->current_interval = BNXT_TIMER_INTERVAL;
10913
10914         clear_bit(BNXT_STATE_OPEN, &bp->state);
10915         return 0;
10916
10917 init_err_release:
10918         bnxt_unmap_bars(bp, pdev);
10919         pci_release_regions(pdev);
10920
10921 init_err_disable:
10922         pci_disable_device(pdev);
10923
10924 init_err:
10925         return rc;
10926 }
10927
10928 /* rtnl_lock held */
10929 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
10930 {
10931         struct sockaddr *addr = p;
10932         struct bnxt *bp = netdev_priv(dev);
10933         int rc = 0;
10934
10935         if (!is_valid_ether_addr(addr->sa_data))
10936                 return -EADDRNOTAVAIL;
10937
10938         if (ether_addr_equal(addr->sa_data, dev->dev_addr))
10939                 return 0;
10940
10941         rc = bnxt_approve_mac(bp, addr->sa_data, true);
10942         if (rc)
10943                 return rc;
10944
10945         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10946         if (netif_running(dev)) {
10947                 bnxt_close_nic(bp, false, false);
10948                 rc = bnxt_open_nic(bp, false, false);
10949         }
10950
10951         return rc;
10952 }
10953
10954 /* rtnl_lock held */
10955 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
10956 {
10957         struct bnxt *bp = netdev_priv(dev);
10958
10959         if (netif_running(dev))
10960                 bnxt_close_nic(bp, false, false);
10961
10962         dev->mtu = new_mtu;
10963         bnxt_set_ring_params(bp);
10964
10965         if (netif_running(dev))
10966                 return bnxt_open_nic(bp, false, false);
10967
10968         return 0;
10969 }
10970
10971 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
10972 {
10973         struct bnxt *bp = netdev_priv(dev);
10974         bool sh = false;
10975         int rc;
10976
10977         if (tc > bp->max_tc) {
10978                 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
10979                            tc, bp->max_tc);
10980                 return -EINVAL;
10981         }
10982
10983         if (netdev_get_num_tc(dev) == tc)
10984                 return 0;
10985
10986         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
10987                 sh = true;
10988
10989         rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
10990                               sh, tc, bp->tx_nr_rings_xdp);
10991         if (rc)
10992                 return rc;
10993
10994         /* Needs to close the device and do hw resource re-allocations */
10995         if (netif_running(bp->dev))
10996                 bnxt_close_nic(bp, true, false);
10997
10998         if (tc) {
10999                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
11000                 netdev_set_num_tc(dev, tc);
11001         } else {
11002                 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11003                 netdev_reset_tc(dev);
11004         }
11005         bp->tx_nr_rings += bp->tx_nr_rings_xdp;
11006         bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
11007                                bp->tx_nr_rings + bp->rx_nr_rings;
11008
11009         if (netif_running(bp->dev))
11010                 return bnxt_open_nic(bp, true, false);
11011
11012         return 0;
11013 }
11014
11015 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
11016                                   void *cb_priv)
11017 {
11018         struct bnxt *bp = cb_priv;
11019
11020         if (!bnxt_tc_flower_enabled(bp) ||
11021             !tc_cls_can_offload_and_chain0(bp->dev, type_data))
11022                 return -EOPNOTSUPP;
11023
11024         switch (type) {
11025         case TC_SETUP_CLSFLOWER:
11026                 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
11027         default:
11028                 return -EOPNOTSUPP;
11029         }
11030 }
11031
11032 LIST_HEAD(bnxt_block_cb_list);
11033
11034 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
11035                          void *type_data)
11036 {
11037         struct bnxt *bp = netdev_priv(dev);
11038
11039         switch (type) {
11040         case TC_SETUP_BLOCK:
11041                 return flow_block_cb_setup_simple(type_data,
11042                                                   &bnxt_block_cb_list,
11043                                                   bnxt_setup_tc_block_cb,
11044                                                   bp, bp, true);
11045         case TC_SETUP_QDISC_MQPRIO: {
11046                 struct tc_mqprio_qopt *mqprio = type_data;
11047
11048                 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
11049
11050                 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
11051         }
11052         default:
11053                 return -EOPNOTSUPP;
11054         }
11055 }
11056
11057 #ifdef CONFIG_RFS_ACCEL
11058 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
11059                             struct bnxt_ntuple_filter *f2)
11060 {
11061         struct flow_keys *keys1 = &f1->fkeys;
11062         struct flow_keys *keys2 = &f2->fkeys;
11063
11064         if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
11065             keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
11066             keys1->ports.ports == keys2->ports.ports &&
11067             keys1->basic.ip_proto == keys2->basic.ip_proto &&
11068             keys1->basic.n_proto == keys2->basic.n_proto &&
11069             keys1->control.flags == keys2->control.flags &&
11070             ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
11071             ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
11072                 return true;
11073
11074         return false;
11075 }
11076
11077 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
11078                               u16 rxq_index, u32 flow_id)
11079 {
11080         struct bnxt *bp = netdev_priv(dev);
11081         struct bnxt_ntuple_filter *fltr, *new_fltr;
11082         struct flow_keys *fkeys;
11083         struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
11084         int rc = 0, idx, bit_id, l2_idx = 0;
11085         struct hlist_head *head;
11086
11087         if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
11088                 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11089                 int off = 0, j;
11090
11091                 netif_addr_lock_bh(dev);
11092                 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
11093                         if (ether_addr_equal(eth->h_dest,
11094                                              vnic->uc_list + off)) {
11095                                 l2_idx = j + 1;
11096                                 break;
11097                         }
11098                 }
11099                 netif_addr_unlock_bh(dev);
11100                 if (!l2_idx)
11101                         return -EINVAL;
11102         }
11103         new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
11104         if (!new_fltr)
11105                 return -ENOMEM;
11106
11107         fkeys = &new_fltr->fkeys;
11108         if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
11109                 rc = -EPROTONOSUPPORT;
11110                 goto err_free;
11111         }
11112
11113         if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
11114              fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
11115             ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
11116              (fkeys->basic.ip_proto != IPPROTO_UDP))) {
11117                 rc = -EPROTONOSUPPORT;
11118                 goto err_free;
11119         }
11120         if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
11121             bp->hwrm_spec_code < 0x10601) {
11122                 rc = -EPROTONOSUPPORT;
11123                 goto err_free;
11124         }
11125         if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
11126             bp->hwrm_spec_code < 0x10601) {
11127                 rc = -EPROTONOSUPPORT;
11128                 goto err_free;
11129         }
11130
11131         memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
11132         memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
11133
11134         idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
11135         head = &bp->ntp_fltr_hash_tbl[idx];
11136         rcu_read_lock();
11137         hlist_for_each_entry_rcu(fltr, head, hash) {
11138                 if (bnxt_fltr_match(fltr, new_fltr)) {
11139                         rcu_read_unlock();
11140                         rc = 0;
11141                         goto err_free;
11142                 }
11143         }
11144         rcu_read_unlock();
11145
11146         spin_lock_bh(&bp->ntp_fltr_lock);
11147         bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
11148                                          BNXT_NTP_FLTR_MAX_FLTR, 0);
11149         if (bit_id < 0) {
11150                 spin_unlock_bh(&bp->ntp_fltr_lock);
11151                 rc = -ENOMEM;
11152                 goto err_free;
11153         }
11154
11155         new_fltr->sw_id = (u16)bit_id;
11156         new_fltr->flow_id = flow_id;
11157         new_fltr->l2_fltr_idx = l2_idx;
11158         new_fltr->rxq = rxq_index;
11159         hlist_add_head_rcu(&new_fltr->hash, head);
11160         bp->ntp_fltr_count++;
11161         spin_unlock_bh(&bp->ntp_fltr_lock);
11162
11163         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11164         bnxt_queue_sp_work(bp);
11165
11166         return new_fltr->sw_id;
11167
11168 err_free:
11169         kfree(new_fltr);
11170         return rc;
11171 }
11172
11173 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
11174 {
11175         int i;
11176
11177         for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
11178                 struct hlist_head *head;
11179                 struct hlist_node *tmp;
11180                 struct bnxt_ntuple_filter *fltr;
11181                 int rc;
11182
11183                 head = &bp->ntp_fltr_hash_tbl[i];
11184                 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
11185                         bool del = false;
11186
11187                         if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
11188                                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
11189                                                         fltr->flow_id,
11190                                                         fltr->sw_id)) {
11191                                         bnxt_hwrm_cfa_ntuple_filter_free(bp,
11192                                                                          fltr);
11193                                         del = true;
11194                                 }
11195                         } else {
11196                                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
11197                                                                        fltr);
11198                                 if (rc)
11199                                         del = true;
11200                                 else
11201                                         set_bit(BNXT_FLTR_VALID, &fltr->state);
11202                         }
11203
11204                         if (del) {
11205                                 spin_lock_bh(&bp->ntp_fltr_lock);
11206                                 hlist_del_rcu(&fltr->hash);
11207                                 bp->ntp_fltr_count--;
11208                                 spin_unlock_bh(&bp->ntp_fltr_lock);
11209                                 synchronize_rcu();
11210                                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
11211                                 kfree(fltr);
11212                         }
11213                 }
11214         }
11215         if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
11216                 netdev_info(bp->dev, "Receive PF driver unload event!");
11217 }
11218
11219 #else
11220
11221 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
11222 {
11223 }
11224
11225 #endif /* CONFIG_RFS_ACCEL */
11226
11227 static void bnxt_udp_tunnel_add(struct net_device *dev,
11228                                 struct udp_tunnel_info *ti)
11229 {
11230         struct bnxt *bp = netdev_priv(dev);
11231
11232         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
11233                 return;
11234
11235         if (!netif_running(dev))
11236                 return;
11237
11238         switch (ti->type) {
11239         case UDP_TUNNEL_TYPE_VXLAN:
11240                 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
11241                         return;
11242
11243                 bp->vxlan_port_cnt++;
11244                 if (bp->vxlan_port_cnt == 1) {
11245                         bp->vxlan_port = ti->port;
11246                         set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
11247                         bnxt_queue_sp_work(bp);
11248                 }
11249                 break;
11250         case UDP_TUNNEL_TYPE_GENEVE:
11251                 if (bp->nge_port_cnt && bp->nge_port != ti->port)
11252                         return;
11253
11254                 bp->nge_port_cnt++;
11255                 if (bp->nge_port_cnt == 1) {
11256                         bp->nge_port = ti->port;
11257                         set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
11258                 }
11259                 break;
11260         default:
11261                 return;
11262         }
11263
11264         bnxt_queue_sp_work(bp);
11265 }
11266
11267 static void bnxt_udp_tunnel_del(struct net_device *dev,
11268                                 struct udp_tunnel_info *ti)
11269 {
11270         struct bnxt *bp = netdev_priv(dev);
11271
11272         if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
11273                 return;
11274
11275         if (!netif_running(dev))
11276                 return;
11277
11278         switch (ti->type) {
11279         case UDP_TUNNEL_TYPE_VXLAN:
11280                 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
11281                         return;
11282                 bp->vxlan_port_cnt--;
11283
11284                 if (bp->vxlan_port_cnt != 0)
11285                         return;
11286
11287                 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
11288                 break;
11289         case UDP_TUNNEL_TYPE_GENEVE:
11290                 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
11291                         return;
11292                 bp->nge_port_cnt--;
11293
11294                 if (bp->nge_port_cnt != 0)
11295                         return;
11296
11297                 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
11298                 break;
11299         default:
11300                 return;
11301         }
11302
11303         bnxt_queue_sp_work(bp);
11304 }
11305
11306 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
11307                                struct net_device *dev, u32 filter_mask,
11308                                int nlflags)
11309 {
11310         struct bnxt *bp = netdev_priv(dev);
11311
11312         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
11313                                        nlflags, filter_mask, NULL);
11314 }
11315
11316 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
11317                                u16 flags, struct netlink_ext_ack *extack)
11318 {
11319         struct bnxt *bp = netdev_priv(dev);
11320         struct nlattr *attr, *br_spec;
11321         int rem, rc = 0;
11322
11323         if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
11324                 return -EOPNOTSUPP;
11325
11326         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
11327         if (!br_spec)
11328                 return -EINVAL;
11329
11330         nla_for_each_nested(attr, br_spec, rem) {
11331                 u16 mode;
11332
11333                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
11334                         continue;
11335
11336                 if (nla_len(attr) < sizeof(mode))
11337                         return -EINVAL;
11338
11339                 mode = nla_get_u16(attr);
11340                 if (mode == bp->br_mode)
11341                         break;
11342
11343                 rc = bnxt_hwrm_set_br_mode(bp, mode);
11344                 if (!rc)
11345                         bp->br_mode = mode;
11346                 break;
11347         }
11348         return rc;
11349 }
11350
11351 int bnxt_get_port_parent_id(struct net_device *dev,
11352                             struct netdev_phys_item_id *ppid)
11353 {
11354         struct bnxt *bp = netdev_priv(dev);
11355
11356         if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
11357                 return -EOPNOTSUPP;
11358
11359         /* The PF and it's VF-reps only support the switchdev framework */
11360         if (!BNXT_PF(bp))
11361                 return -EOPNOTSUPP;
11362
11363         ppid->id_len = sizeof(bp->switch_id);
11364         memcpy(ppid->id, bp->switch_id, ppid->id_len);
11365
11366         return 0;
11367 }
11368
11369 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
11370 {
11371         struct bnxt *bp = netdev_priv(dev);
11372
11373         return &bp->dl_port;
11374 }
11375
11376 static const struct net_device_ops bnxt_netdev_ops = {
11377         .ndo_open               = bnxt_open,
11378         .ndo_start_xmit         = bnxt_start_xmit,
11379         .ndo_stop               = bnxt_close,
11380         .ndo_get_stats64        = bnxt_get_stats64,
11381         .ndo_set_rx_mode        = bnxt_set_rx_mode,
11382         .ndo_do_ioctl           = bnxt_ioctl,
11383         .ndo_validate_addr      = eth_validate_addr,
11384         .ndo_set_mac_address    = bnxt_change_mac_addr,
11385         .ndo_change_mtu         = bnxt_change_mtu,
11386         .ndo_fix_features       = bnxt_fix_features,
11387         .ndo_set_features       = bnxt_set_features,
11388         .ndo_tx_timeout         = bnxt_tx_timeout,
11389 #ifdef CONFIG_BNXT_SRIOV
11390         .ndo_get_vf_config      = bnxt_get_vf_config,
11391         .ndo_set_vf_mac         = bnxt_set_vf_mac,
11392         .ndo_set_vf_vlan        = bnxt_set_vf_vlan,
11393         .ndo_set_vf_rate        = bnxt_set_vf_bw,
11394         .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
11395         .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
11396         .ndo_set_vf_trust       = bnxt_set_vf_trust,
11397 #endif
11398         .ndo_setup_tc           = bnxt_setup_tc,
11399 #ifdef CONFIG_RFS_ACCEL
11400         .ndo_rx_flow_steer      = bnxt_rx_flow_steer,
11401 #endif
11402         .ndo_udp_tunnel_add     = bnxt_udp_tunnel_add,
11403         .ndo_udp_tunnel_del     = bnxt_udp_tunnel_del,
11404         .ndo_bpf                = bnxt_xdp,
11405         .ndo_xdp_xmit           = bnxt_xdp_xmit,
11406         .ndo_bridge_getlink     = bnxt_bridge_getlink,
11407         .ndo_bridge_setlink     = bnxt_bridge_setlink,
11408         .ndo_get_devlink_port   = bnxt_get_devlink_port,
11409 };
11410
11411 static void bnxt_remove_one(struct pci_dev *pdev)
11412 {
11413         struct net_device *dev = pci_get_drvdata(pdev);
11414         struct bnxt *bp = netdev_priv(dev);
11415
11416         if (BNXT_PF(bp)) {
11417                 bnxt_sriov_disable(bp);
11418                 bnxt_dl_unregister(bp);
11419         }
11420
11421         pci_disable_pcie_error_reporting(pdev);
11422         unregister_netdev(dev);
11423         bnxt_shutdown_tc(bp);
11424         bnxt_cancel_sp_work(bp);
11425         bp->sp_event = 0;
11426
11427         bnxt_clear_int_mode(bp);
11428         bnxt_hwrm_func_drv_unrgtr(bp);
11429         bnxt_free_hwrm_resources(bp);
11430         bnxt_free_hwrm_short_cmd_req(bp);
11431         bnxt_ethtool_free(bp);
11432         bnxt_dcb_free(bp);
11433         kfree(bp->edev);
11434         bp->edev = NULL;
11435         kfree(bp->fw_health);
11436         bp->fw_health = NULL;
11437         bnxt_cleanup_pci(bp);
11438         bnxt_free_ctx_mem(bp);
11439         kfree(bp->ctx);
11440         bp->ctx = NULL;
11441         bnxt_free_port_stats(bp);
11442         free_netdev(dev);
11443 }
11444
11445 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
11446 {
11447         int rc = 0;
11448         struct bnxt_link_info *link_info = &bp->link_info;
11449
11450         rc = bnxt_hwrm_phy_qcaps(bp);
11451         if (rc) {
11452                 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
11453                            rc);
11454                 return rc;
11455         }
11456         rc = bnxt_update_link(bp, false);
11457         if (rc) {
11458                 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
11459                            rc);
11460                 return rc;
11461         }
11462
11463         /* Older firmware does not have supported_auto_speeds, so assume
11464          * that all supported speeds can be autonegotiated.
11465          */
11466         if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
11467                 link_info->support_auto_speeds = link_info->support_speeds;
11468
11469         if (!fw_dflt)
11470                 return 0;
11471
11472         bnxt_init_ethtool_link_settings(bp);
11473         return 0;
11474 }
11475
11476 static int bnxt_get_max_irq(struct pci_dev *pdev)
11477 {
11478         u16 ctrl;
11479
11480         if (!pdev->msix_cap)
11481                 return 1;
11482
11483         pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
11484         return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
11485 }
11486
11487 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
11488                                 int *max_cp)
11489 {
11490         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11491         int max_ring_grps = 0, max_irq;
11492
11493         *max_tx = hw_resc->max_tx_rings;
11494         *max_rx = hw_resc->max_rx_rings;
11495         *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
11496         max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
11497                         bnxt_get_ulp_msix_num(bp),
11498                         hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
11499         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11500                 *max_cp = min_t(int, *max_cp, max_irq);
11501         max_ring_grps = hw_resc->max_hw_ring_grps;
11502         if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
11503                 *max_cp -= 1;
11504                 *max_rx -= 2;
11505         }
11506         if (bp->flags & BNXT_FLAG_AGG_RINGS)
11507                 *max_rx >>= 1;
11508         if (bp->flags & BNXT_FLAG_CHIP_P5) {
11509                 bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
11510                 /* On P5 chips, max_cp output param should be available NQs */
11511                 *max_cp = max_irq;
11512         }
11513         *max_rx = min_t(int, *max_rx, max_ring_grps);
11514 }
11515
11516 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
11517 {
11518         int rx, tx, cp;
11519
11520         _bnxt_get_max_rings(bp, &rx, &tx, &cp);
11521         *max_rx = rx;
11522         *max_tx = tx;
11523         if (!rx || !tx || !cp)
11524                 return -ENOMEM;
11525
11526         return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
11527 }
11528
11529 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
11530                                bool shared)
11531 {
11532         int rc;
11533
11534         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
11535         if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
11536                 /* Not enough rings, try disabling agg rings. */
11537                 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
11538                 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
11539                 if (rc) {
11540                         /* set BNXT_FLAG_AGG_RINGS back for consistency */
11541                         bp->flags |= BNXT_FLAG_AGG_RINGS;
11542                         return rc;
11543                 }
11544                 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
11545                 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11546                 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11547                 bnxt_set_ring_params(bp);
11548         }
11549
11550         if (bp->flags & BNXT_FLAG_ROCE_CAP) {
11551                 int max_cp, max_stat, max_irq;
11552
11553                 /* Reserve minimum resources for RoCE */
11554                 max_cp = bnxt_get_max_func_cp_rings(bp);
11555                 max_stat = bnxt_get_max_func_stat_ctxs(bp);
11556                 max_irq = bnxt_get_max_func_irqs(bp);
11557                 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
11558                     max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
11559                     max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
11560                         return 0;
11561
11562                 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
11563                 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
11564                 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
11565                 max_cp = min_t(int, max_cp, max_irq);
11566                 max_cp = min_t(int, max_cp, max_stat);
11567                 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
11568                 if (rc)
11569                         rc = 0;
11570         }
11571         return rc;
11572 }
11573
11574 /* In initial default shared ring setting, each shared ring must have a
11575  * RX/TX ring pair.
11576  */
11577 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
11578 {
11579         bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
11580         bp->rx_nr_rings = bp->cp_nr_rings;
11581         bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
11582         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11583 }
11584
11585 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
11586 {
11587         int dflt_rings, max_rx_rings, max_tx_rings, rc;
11588
11589         if (!bnxt_can_reserve_rings(bp))
11590                 return 0;
11591
11592         if (sh)
11593                 bp->flags |= BNXT_FLAG_SHARED_RINGS;
11594         dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
11595         /* Reduce default rings on multi-port cards so that total default
11596          * rings do not exceed CPU count.
11597          */
11598         if (bp->port_count > 1) {
11599                 int max_rings =
11600                         max_t(int, num_online_cpus() / bp->port_count, 1);
11601
11602                 dflt_rings = min_t(int, dflt_rings, max_rings);
11603         }
11604         rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
11605         if (rc)
11606                 return rc;
11607         bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
11608         bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
11609         if (sh)
11610                 bnxt_trim_dflt_sh_rings(bp);
11611         else
11612                 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
11613         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
11614
11615         rc = __bnxt_reserve_rings(bp);
11616         if (rc)
11617                 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
11618         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11619         if (sh)
11620                 bnxt_trim_dflt_sh_rings(bp);
11621
11622         /* Rings may have been trimmed, re-reserve the trimmed rings. */
11623         if (bnxt_need_reserve_rings(bp)) {
11624                 rc = __bnxt_reserve_rings(bp);
11625                 if (rc)
11626                         netdev_warn(bp->dev, "2nd rings reservation failed.\n");
11627                 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11628         }
11629         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
11630                 bp->rx_nr_rings++;
11631                 bp->cp_nr_rings++;
11632         }
11633         return rc;
11634 }
11635
11636 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
11637 {
11638         int rc;
11639
11640         if (bp->tx_nr_rings)
11641                 return 0;
11642
11643         bnxt_ulp_irq_stop(bp);
11644         bnxt_clear_int_mode(bp);
11645         rc = bnxt_set_dflt_rings(bp, true);
11646         if (rc) {
11647                 netdev_err(bp->dev, "Not enough rings available.\n");
11648                 goto init_dflt_ring_err;
11649         }
11650         rc = bnxt_init_int_mode(bp);
11651         if (rc)
11652                 goto init_dflt_ring_err;
11653
11654         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11655         if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
11656                 bp->flags |= BNXT_FLAG_RFS;
11657                 bp->dev->features |= NETIF_F_NTUPLE;
11658         }
11659 init_dflt_ring_err:
11660         bnxt_ulp_irq_restart(bp, rc);
11661         return rc;
11662 }
11663
11664 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
11665 {
11666         int rc;
11667
11668         ASSERT_RTNL();
11669         bnxt_hwrm_func_qcaps(bp);
11670
11671         if (netif_running(bp->dev))
11672                 __bnxt_close_nic(bp, true, false);
11673
11674         bnxt_ulp_irq_stop(bp);
11675         bnxt_clear_int_mode(bp);
11676         rc = bnxt_init_int_mode(bp);
11677         bnxt_ulp_irq_restart(bp, rc);
11678
11679         if (netif_running(bp->dev)) {
11680                 if (rc)
11681                         dev_close(bp->dev);
11682                 else
11683                         rc = bnxt_open_nic(bp, true, false);
11684         }
11685
11686         return rc;
11687 }
11688
11689 static int bnxt_init_mac_addr(struct bnxt *bp)
11690 {
11691         int rc = 0;
11692
11693         if (BNXT_PF(bp)) {
11694                 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
11695         } else {
11696 #ifdef CONFIG_BNXT_SRIOV
11697                 struct bnxt_vf_info *vf = &bp->vf;
11698                 bool strict_approval = true;
11699
11700                 if (is_valid_ether_addr(vf->mac_addr)) {
11701                         /* overwrite netdev dev_addr with admin VF MAC */
11702                         memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
11703                         /* Older PF driver or firmware may not approve this
11704                          * correctly.
11705                          */
11706                         strict_approval = false;
11707                 } else {
11708                         eth_hw_addr_random(bp->dev);
11709                 }
11710                 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
11711 #endif
11712         }
11713         return rc;
11714 }
11715
11716 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
11717 {
11718         struct pci_dev *pdev = bp->pdev;
11719         int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
11720         u32 dw;
11721
11722         if (!pos) {
11723                 netdev_info(bp->dev, "Unable do read adapter's DSN");
11724                 return -EOPNOTSUPP;
11725         }
11726
11727         /* DSN (two dw) is at an offset of 4 from the cap pos */
11728         pos += 4;
11729         pci_read_config_dword(pdev, pos, &dw);
11730         put_unaligned_le32(dw, &dsn[0]);
11731         pci_read_config_dword(pdev, pos + 4, &dw);
11732         put_unaligned_le32(dw, &dsn[4]);
11733         return 0;
11734 }
11735
11736 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
11737 {
11738         static int version_printed;
11739         struct net_device *dev;
11740         struct bnxt *bp;
11741         int rc, max_irqs;
11742
11743         if (pci_is_bridge(pdev))
11744                 return -ENODEV;
11745
11746         if (version_printed++ == 0)
11747                 pr_info("%s", version);
11748
11749         max_irqs = bnxt_get_max_irq(pdev);
11750         dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
11751         if (!dev)
11752                 return -ENOMEM;
11753
11754         bp = netdev_priv(dev);
11755         bnxt_set_max_func_irqs(bp, max_irqs);
11756
11757         if (bnxt_vf_pciid(ent->driver_data))
11758                 bp->flags |= BNXT_FLAG_VF;
11759
11760         if (pdev->msix_cap)
11761                 bp->flags |= BNXT_FLAG_MSIX_CAP;
11762
11763         rc = bnxt_init_board(pdev, dev);
11764         if (rc < 0)
11765                 goto init_err_free;
11766
11767         dev->netdev_ops = &bnxt_netdev_ops;
11768         dev->watchdog_timeo = BNXT_TX_TIMEOUT;
11769         dev->ethtool_ops = &bnxt_ethtool_ops;
11770         pci_set_drvdata(pdev, dev);
11771
11772         rc = bnxt_alloc_hwrm_resources(bp);
11773         if (rc)
11774                 goto init_err_pci_clean;
11775
11776         mutex_init(&bp->hwrm_cmd_lock);
11777         mutex_init(&bp->link_lock);
11778
11779         rc = bnxt_fw_init_one_p1(bp);
11780         if (rc)
11781                 goto init_err_pci_clean;
11782
11783         if (BNXT_CHIP_P5(bp))
11784                 bp->flags |= BNXT_FLAG_CHIP_P5;
11785
11786         rc = bnxt_fw_init_one_p2(bp);
11787         if (rc)
11788                 goto init_err_pci_clean;
11789
11790         dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
11791                            NETIF_F_TSO | NETIF_F_TSO6 |
11792                            NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
11793                            NETIF_F_GSO_IPXIP4 |
11794                            NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
11795                            NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
11796                            NETIF_F_RXCSUM | NETIF_F_GRO;
11797
11798         if (BNXT_SUPPORTS_TPA(bp))
11799                 dev->hw_features |= NETIF_F_LRO;
11800
11801         dev->hw_enc_features =
11802                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
11803                         NETIF_F_TSO | NETIF_F_TSO6 |
11804                         NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
11805                         NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
11806                         NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
11807         dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
11808                                     NETIF_F_GSO_GRE_CSUM;
11809         dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
11810         dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
11811                             NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
11812         if (BNXT_SUPPORTS_TPA(bp))
11813                 dev->hw_features |= NETIF_F_GRO_HW;
11814         dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
11815         if (dev->features & NETIF_F_GRO_HW)
11816                 dev->features &= ~NETIF_F_LRO;
11817         dev->priv_flags |= IFF_UNICAST_FLT;
11818
11819 #ifdef CONFIG_BNXT_SRIOV
11820         init_waitqueue_head(&bp->sriov_cfg_wait);
11821         mutex_init(&bp->sriov_lock);
11822 #endif
11823         if (BNXT_SUPPORTS_TPA(bp)) {
11824                 bp->gro_func = bnxt_gro_func_5730x;
11825                 if (BNXT_CHIP_P4(bp))
11826                         bp->gro_func = bnxt_gro_func_5731x;
11827                 else if (BNXT_CHIP_P5(bp))
11828                         bp->gro_func = bnxt_gro_func_5750x;
11829         }
11830         if (!BNXT_CHIP_P4_PLUS(bp))
11831                 bp->flags |= BNXT_FLAG_DOUBLE_DB;
11832
11833         bp->ulp_probe = bnxt_ulp_probe;
11834
11835         rc = bnxt_init_mac_addr(bp);
11836         if (rc) {
11837                 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
11838                 rc = -EADDRNOTAVAIL;
11839                 goto init_err_pci_clean;
11840         }
11841
11842         if (BNXT_PF(bp)) {
11843                 /* Read the adapter's DSN to use as the eswitch switch_id */
11844                 rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
11845                 if (rc)
11846                         goto init_err_pci_clean;
11847         }
11848
11849         /* MTU range: 60 - FW defined max */
11850         dev->min_mtu = ETH_ZLEN;
11851         dev->max_mtu = bp->max_mtu;
11852
11853         rc = bnxt_probe_phy(bp, true);
11854         if (rc)
11855                 goto init_err_pci_clean;
11856
11857         bnxt_set_rx_skb_mode(bp, false);
11858         bnxt_set_tpa_flags(bp);
11859         bnxt_set_ring_params(bp);
11860         rc = bnxt_set_dflt_rings(bp, true);
11861         if (rc) {
11862                 netdev_err(bp->dev, "Not enough rings available.\n");
11863                 rc = -ENOMEM;
11864                 goto init_err_pci_clean;
11865         }
11866
11867         bnxt_fw_init_one_p3(bp);
11868
11869         if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
11870                 bp->flags |= BNXT_FLAG_STRIP_VLAN;
11871
11872         rc = bnxt_init_int_mode(bp);
11873         if (rc)
11874                 goto init_err_pci_clean;
11875
11876         /* No TC has been set yet and rings may have been trimmed due to
11877          * limited MSIX, so we re-initialize the TX rings per TC.
11878          */
11879         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
11880
11881         if (BNXT_PF(bp)) {
11882                 if (!bnxt_pf_wq) {
11883                         bnxt_pf_wq =
11884                                 create_singlethread_workqueue("bnxt_pf_wq");
11885                         if (!bnxt_pf_wq) {
11886                                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
11887                                 goto init_err_pci_clean;
11888                         }
11889                 }
11890                 bnxt_init_tc(bp);
11891         }
11892
11893         rc = register_netdev(dev);
11894         if (rc)
11895                 goto init_err_cleanup_tc;
11896
11897         if (BNXT_PF(bp))
11898                 bnxt_dl_register(bp);
11899
11900         netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
11901                     board_info[ent->driver_data].name,
11902                     (long)pci_resource_start(pdev, 0), dev->dev_addr);
11903         pcie_print_link_status(pdev);
11904
11905         return 0;
11906
11907 init_err_cleanup_tc:
11908         bnxt_shutdown_tc(bp);
11909         bnxt_clear_int_mode(bp);
11910
11911 init_err_pci_clean:
11912         bnxt_hwrm_func_drv_unrgtr(bp);
11913         bnxt_free_hwrm_short_cmd_req(bp);
11914         bnxt_free_hwrm_resources(bp);
11915         bnxt_free_ctx_mem(bp);
11916         kfree(bp->ctx);
11917         bp->ctx = NULL;
11918         kfree(bp->fw_health);
11919         bp->fw_health = NULL;
11920         bnxt_cleanup_pci(bp);
11921
11922 init_err_free:
11923         free_netdev(dev);
11924         return rc;
11925 }
11926
11927 static void bnxt_shutdown(struct pci_dev *pdev)
11928 {
11929         struct net_device *dev = pci_get_drvdata(pdev);
11930         struct bnxt *bp;
11931
11932         if (!dev)
11933                 return;
11934
11935         rtnl_lock();
11936         bp = netdev_priv(dev);
11937         if (!bp)
11938                 goto shutdown_exit;
11939
11940         if (netif_running(dev))
11941                 dev_close(dev);
11942
11943         bnxt_ulp_shutdown(bp);
11944
11945         if (system_state == SYSTEM_POWER_OFF) {
11946                 bnxt_clear_int_mode(bp);
11947                 pci_disable_device(pdev);
11948                 pci_wake_from_d3(pdev, bp->wol);
11949                 pci_set_power_state(pdev, PCI_D3hot);
11950         }
11951
11952 shutdown_exit:
11953         rtnl_unlock();
11954 }
11955
11956 #ifdef CONFIG_PM_SLEEP
11957 static int bnxt_suspend(struct device *device)
11958 {
11959         struct net_device *dev = dev_get_drvdata(device);
11960         struct bnxt *bp = netdev_priv(dev);
11961         int rc = 0;
11962
11963         rtnl_lock();
11964         bnxt_ulp_stop(bp);
11965         if (netif_running(dev)) {
11966                 netif_device_detach(dev);
11967                 rc = bnxt_close(dev);
11968         }
11969         bnxt_hwrm_func_drv_unrgtr(bp);
11970         pci_disable_device(bp->pdev);
11971         bnxt_free_ctx_mem(bp);
11972         kfree(bp->ctx);
11973         bp->ctx = NULL;
11974         rtnl_unlock();
11975         return rc;
11976 }
11977
11978 static int bnxt_resume(struct device *device)
11979 {
11980         struct net_device *dev = dev_get_drvdata(device);
11981         struct bnxt *bp = netdev_priv(dev);
11982         int rc = 0;
11983
11984         rtnl_lock();
11985         rc = pci_enable_device(bp->pdev);
11986         if (rc) {
11987                 netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
11988                            rc);
11989                 goto resume_exit;
11990         }
11991         pci_set_master(bp->pdev);
11992         if (bnxt_hwrm_ver_get(bp)) {
11993                 rc = -ENODEV;
11994                 goto resume_exit;
11995         }
11996         rc = bnxt_hwrm_func_reset(bp);
11997         if (rc) {
11998                 rc = -EBUSY;
11999                 goto resume_exit;
12000         }
12001
12002         if (bnxt_hwrm_queue_qportcfg(bp)) {
12003                 rc = -ENODEV;
12004                 goto resume_exit;
12005         }
12006
12007         if (bp->hwrm_spec_code >= 0x10803) {
12008                 if (bnxt_alloc_ctx_mem(bp)) {
12009                         rc = -ENODEV;
12010                         goto resume_exit;
12011                 }
12012         }
12013         if (BNXT_NEW_RM(bp))
12014                 bnxt_hwrm_func_resc_qcaps(bp, false);
12015
12016         if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
12017                 rc = -ENODEV;
12018                 goto resume_exit;
12019         }
12020
12021         bnxt_get_wol_settings(bp);
12022         if (netif_running(dev)) {
12023                 rc = bnxt_open(dev);
12024                 if (!rc)
12025                         netif_device_attach(dev);
12026         }
12027
12028 resume_exit:
12029         bnxt_ulp_start(bp, rc);
12030         rtnl_unlock();
12031         return rc;
12032 }
12033
12034 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
12035 #define BNXT_PM_OPS (&bnxt_pm_ops)
12036
12037 #else
12038
12039 #define BNXT_PM_OPS NULL
12040
12041 #endif /* CONFIG_PM_SLEEP */
12042
12043 /**
12044  * bnxt_io_error_detected - called when PCI error is detected
12045  * @pdev: Pointer to PCI device
12046  * @state: The current pci connection state
12047  *
12048  * This function is called after a PCI bus error affecting
12049  * this device has been detected.
12050  */
12051 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
12052                                                pci_channel_state_t state)
12053 {
12054         struct net_device *netdev = pci_get_drvdata(pdev);
12055         struct bnxt *bp = netdev_priv(netdev);
12056
12057         netdev_info(netdev, "PCI I/O error detected\n");
12058
12059         rtnl_lock();
12060         netif_device_detach(netdev);
12061
12062         bnxt_ulp_stop(bp);
12063
12064         if (state == pci_channel_io_perm_failure) {
12065                 rtnl_unlock();
12066                 return PCI_ERS_RESULT_DISCONNECT;
12067         }
12068
12069         if (netif_running(netdev))
12070                 bnxt_close(netdev);
12071
12072         pci_disable_device(pdev);
12073         rtnl_unlock();
12074
12075         /* Request a slot slot reset. */
12076         return PCI_ERS_RESULT_NEED_RESET;
12077 }
12078
12079 /**
12080  * bnxt_io_slot_reset - called after the pci bus has been reset.
12081  * @pdev: Pointer to PCI device
12082  *
12083  * Restart the card from scratch, as if from a cold-boot.
12084  * At this point, the card has exprienced a hard reset,
12085  * followed by fixups by BIOS, and has its config space
12086  * set up identically to what it was at cold boot.
12087  */
12088 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
12089 {
12090         struct net_device *netdev = pci_get_drvdata(pdev);
12091         struct bnxt *bp = netdev_priv(netdev);
12092         int err = 0;
12093         pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
12094
12095         netdev_info(bp->dev, "PCI Slot Reset\n");
12096
12097         rtnl_lock();
12098
12099         if (pci_enable_device(pdev)) {
12100                 dev_err(&pdev->dev,
12101                         "Cannot re-enable PCI device after reset.\n");
12102         } else {
12103                 pci_set_master(pdev);
12104
12105                 err = bnxt_hwrm_func_reset(bp);
12106                 if (!err && netif_running(netdev))
12107                         err = bnxt_open(netdev);
12108
12109                 if (!err)
12110                         result = PCI_ERS_RESULT_RECOVERED;
12111                 bnxt_ulp_start(bp, err);
12112         }
12113
12114         if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
12115                 dev_close(netdev);
12116
12117         rtnl_unlock();
12118
12119         return PCI_ERS_RESULT_RECOVERED;
12120 }
12121
12122 /**
12123  * bnxt_io_resume - called when traffic can start flowing again.
12124  * @pdev: Pointer to PCI device
12125  *
12126  * This callback is called when the error recovery driver tells
12127  * us that its OK to resume normal operation.
12128  */
12129 static void bnxt_io_resume(struct pci_dev *pdev)
12130 {
12131         struct net_device *netdev = pci_get_drvdata(pdev);
12132
12133         rtnl_lock();
12134
12135         netif_device_attach(netdev);
12136
12137         rtnl_unlock();
12138 }
12139
12140 static const struct pci_error_handlers bnxt_err_handler = {
12141         .error_detected = bnxt_io_error_detected,
12142         .slot_reset     = bnxt_io_slot_reset,
12143         .resume         = bnxt_io_resume
12144 };
12145
12146 static struct pci_driver bnxt_pci_driver = {
12147         .name           = DRV_MODULE_NAME,
12148         .id_table       = bnxt_pci_tbl,
12149         .probe          = bnxt_init_one,
12150         .remove         = bnxt_remove_one,
12151         .shutdown       = bnxt_shutdown,
12152         .driver.pm      = BNXT_PM_OPS,
12153         .err_handler    = &bnxt_err_handler,
12154 #if defined(CONFIG_BNXT_SRIOV)
12155         .sriov_configure = bnxt_sriov_configure,
12156 #endif
12157 };
12158
12159 static int __init bnxt_init(void)
12160 {
12161         bnxt_debug_init();
12162         return pci_register_driver(&bnxt_pci_driver);
12163 }
12164
12165 static void __exit bnxt_exit(void)
12166 {
12167         pci_unregister_driver(&bnxt_pci_driver);
12168         if (bnxt_pf_wq)
12169                 destroy_workqueue(bnxt_pf_wq);
12170         bnxt_debug_exit();
12171 }
12172
12173 module_init(bnxt_init);
12174 module_exit(bnxt_exit);